var/home/core/zuul-output/0000755000175000017500000000000015073117477014541 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015073141604015472 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005435573215073141573017721 0ustar rootrootOct 13 06:46:31 crc systemd[1]: Starting Kubernetes Kubelet... Oct 13 06:46:31 crc restorecon[4575]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:31 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 13 06:46:32 crc restorecon[4575]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Oct 13 06:46:32 crc kubenswrapper[4664]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 13 06:46:32 crc kubenswrapper[4664]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Oct 13 06:46:32 crc kubenswrapper[4664]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 13 06:46:32 crc kubenswrapper[4664]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 13 06:46:32 crc kubenswrapper[4664]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Oct 13 06:46:32 crc kubenswrapper[4664]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.794438 4664 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801442 4664 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801473 4664 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801484 4664 feature_gate.go:330] unrecognized feature gate: Example Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801497 4664 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801507 4664 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801518 4664 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801528 4664 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801536 4664 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801545 4664 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801553 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801562 4664 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801570 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801581 4664 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801592 4664 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801601 4664 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801626 4664 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801636 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801645 4664 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801655 4664 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801663 4664 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801672 4664 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801680 4664 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801688 4664 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801696 4664 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801705 4664 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801713 4664 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801722 4664 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801731 4664 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801739 4664 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801747 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801755 4664 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801764 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801772 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801780 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801788 4664 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801826 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801834 4664 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801843 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801851 4664 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801860 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801869 4664 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801877 4664 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801886 4664 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801899 4664 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801909 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801919 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801928 4664 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801938 4664 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801946 4664 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801957 4664 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801967 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801977 4664 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801986 4664 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.801995 4664 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802003 4664 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802012 4664 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802024 4664 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802033 4664 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802042 4664 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802050 4664 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802058 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802066 4664 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802075 4664 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802083 4664 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802092 4664 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802100 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802108 4664 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802118 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802127 4664 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802135 4664 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.802144 4664 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802315 4664 flags.go:64] FLAG: --address="0.0.0.0" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802333 4664 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802350 4664 flags.go:64] FLAG: --anonymous-auth="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802363 4664 flags.go:64] FLAG: --application-metrics-count-limit="100" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802375 4664 flags.go:64] FLAG: --authentication-token-webhook="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802386 4664 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802399 4664 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802411 4664 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802423 4664 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802433 4664 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802443 4664 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802453 4664 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802463 4664 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802473 4664 flags.go:64] FLAG: --cgroup-root="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802483 4664 flags.go:64] FLAG: --cgroups-per-qos="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802493 4664 flags.go:64] FLAG: --client-ca-file="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802503 4664 flags.go:64] FLAG: --cloud-config="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802514 4664 flags.go:64] FLAG: --cloud-provider="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802524 4664 flags.go:64] FLAG: --cluster-dns="[]" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802535 4664 flags.go:64] FLAG: --cluster-domain="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802544 4664 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802554 4664 flags.go:64] FLAG: --config-dir="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802563 4664 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802574 4664 flags.go:64] FLAG: --container-log-max-files="5" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802587 4664 flags.go:64] FLAG: --container-log-max-size="10Mi" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802596 4664 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802606 4664 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802616 4664 flags.go:64] FLAG: --containerd-namespace="k8s.io" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802626 4664 flags.go:64] FLAG: --contention-profiling="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802636 4664 flags.go:64] FLAG: --cpu-cfs-quota="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802645 4664 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802655 4664 flags.go:64] FLAG: --cpu-manager-policy="none" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802665 4664 flags.go:64] FLAG: --cpu-manager-policy-options="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802676 4664 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802687 4664 flags.go:64] FLAG: --enable-controller-attach-detach="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802696 4664 flags.go:64] FLAG: --enable-debugging-handlers="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802706 4664 flags.go:64] FLAG: --enable-load-reader="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802716 4664 flags.go:64] FLAG: --enable-server="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802725 4664 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802739 4664 flags.go:64] FLAG: --event-burst="100" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802749 4664 flags.go:64] FLAG: --event-qps="50" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802758 4664 flags.go:64] FLAG: --event-storage-age-limit="default=0" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802769 4664 flags.go:64] FLAG: --event-storage-event-limit="default=0" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802779 4664 flags.go:64] FLAG: --eviction-hard="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802791 4664 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802826 4664 flags.go:64] FLAG: --eviction-minimum-reclaim="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802836 4664 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802846 4664 flags.go:64] FLAG: --eviction-soft="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802857 4664 flags.go:64] FLAG: --eviction-soft-grace-period="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802867 4664 flags.go:64] FLAG: --exit-on-lock-contention="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802877 4664 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802888 4664 flags.go:64] FLAG: --experimental-mounter-path="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802899 4664 flags.go:64] FLAG: --fail-cgroupv1="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802909 4664 flags.go:64] FLAG: --fail-swap-on="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802919 4664 flags.go:64] FLAG: --feature-gates="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802932 4664 flags.go:64] FLAG: --file-check-frequency="20s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802942 4664 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802952 4664 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802962 4664 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802972 4664 flags.go:64] FLAG: --healthz-port="10248" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.802990 4664 flags.go:64] FLAG: --help="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803000 4664 flags.go:64] FLAG: --hostname-override="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803009 4664 flags.go:64] FLAG: --housekeeping-interval="10s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803019 4664 flags.go:64] FLAG: --http-check-frequency="20s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803029 4664 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803039 4664 flags.go:64] FLAG: --image-credential-provider-config="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803048 4664 flags.go:64] FLAG: --image-gc-high-threshold="85" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803057 4664 flags.go:64] FLAG: --image-gc-low-threshold="80" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803067 4664 flags.go:64] FLAG: --image-service-endpoint="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803076 4664 flags.go:64] FLAG: --kernel-memcg-notification="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803086 4664 flags.go:64] FLAG: --kube-api-burst="100" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803116 4664 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803126 4664 flags.go:64] FLAG: --kube-api-qps="50" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803136 4664 flags.go:64] FLAG: --kube-reserved="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803145 4664 flags.go:64] FLAG: --kube-reserved-cgroup="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803155 4664 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803165 4664 flags.go:64] FLAG: --kubelet-cgroups="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803175 4664 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803185 4664 flags.go:64] FLAG: --lock-file="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803194 4664 flags.go:64] FLAG: --log-cadvisor-usage="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803204 4664 flags.go:64] FLAG: --log-flush-frequency="5s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803214 4664 flags.go:64] FLAG: --log-json-info-buffer-size="0" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803228 4664 flags.go:64] FLAG: --log-json-split-stream="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803238 4664 flags.go:64] FLAG: --log-text-info-buffer-size="0" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803248 4664 flags.go:64] FLAG: --log-text-split-stream="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803257 4664 flags.go:64] FLAG: --logging-format="text" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803267 4664 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803277 4664 flags.go:64] FLAG: --make-iptables-util-chains="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803288 4664 flags.go:64] FLAG: --manifest-url="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803298 4664 flags.go:64] FLAG: --manifest-url-header="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803310 4664 flags.go:64] FLAG: --max-housekeeping-interval="15s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803321 4664 flags.go:64] FLAG: --max-open-files="1000000" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803336 4664 flags.go:64] FLAG: --max-pods="110" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803346 4664 flags.go:64] FLAG: --maximum-dead-containers="-1" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803356 4664 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803366 4664 flags.go:64] FLAG: --memory-manager-policy="None" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803376 4664 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803386 4664 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803396 4664 flags.go:64] FLAG: --node-ip="192.168.126.11" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803406 4664 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803426 4664 flags.go:64] FLAG: --node-status-max-images="50" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803436 4664 flags.go:64] FLAG: --node-status-update-frequency="10s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803446 4664 flags.go:64] FLAG: --oom-score-adj="-999" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803456 4664 flags.go:64] FLAG: --pod-cidr="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803466 4664 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803480 4664 flags.go:64] FLAG: --pod-manifest-path="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803490 4664 flags.go:64] FLAG: --pod-max-pids="-1" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803501 4664 flags.go:64] FLAG: --pods-per-core="0" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803510 4664 flags.go:64] FLAG: --port="10250" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803520 4664 flags.go:64] FLAG: --protect-kernel-defaults="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803530 4664 flags.go:64] FLAG: --provider-id="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803540 4664 flags.go:64] FLAG: --qos-reserved="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803550 4664 flags.go:64] FLAG: --read-only-port="10255" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803560 4664 flags.go:64] FLAG: --register-node="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803569 4664 flags.go:64] FLAG: --register-schedulable="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803579 4664 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803596 4664 flags.go:64] FLAG: --registry-burst="10" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803606 4664 flags.go:64] FLAG: --registry-qps="5" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803615 4664 flags.go:64] FLAG: --reserved-cpus="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803625 4664 flags.go:64] FLAG: --reserved-memory="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803637 4664 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803646 4664 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803657 4664 flags.go:64] FLAG: --rotate-certificates="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803668 4664 flags.go:64] FLAG: --rotate-server-certificates="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803682 4664 flags.go:64] FLAG: --runonce="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803692 4664 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803702 4664 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803712 4664 flags.go:64] FLAG: --seccomp-default="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803722 4664 flags.go:64] FLAG: --serialize-image-pulls="true" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803732 4664 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803742 4664 flags.go:64] FLAG: --storage-driver-db="cadvisor" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803752 4664 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803762 4664 flags.go:64] FLAG: --storage-driver-password="root" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803772 4664 flags.go:64] FLAG: --storage-driver-secure="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803782 4664 flags.go:64] FLAG: --storage-driver-table="stats" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803791 4664 flags.go:64] FLAG: --storage-driver-user="root" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803828 4664 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803839 4664 flags.go:64] FLAG: --sync-frequency="1m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803849 4664 flags.go:64] FLAG: --system-cgroups="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803858 4664 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803873 4664 flags.go:64] FLAG: --system-reserved-cgroup="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803883 4664 flags.go:64] FLAG: --tls-cert-file="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803896 4664 flags.go:64] FLAG: --tls-cipher-suites="[]" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803908 4664 flags.go:64] FLAG: --tls-min-version="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803918 4664 flags.go:64] FLAG: --tls-private-key-file="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803928 4664 flags.go:64] FLAG: --topology-manager-policy="none" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803937 4664 flags.go:64] FLAG: --topology-manager-policy-options="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803947 4664 flags.go:64] FLAG: --topology-manager-scope="container" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803956 4664 flags.go:64] FLAG: --v="2" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803969 4664 flags.go:64] FLAG: --version="false" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803982 4664 flags.go:64] FLAG: --vmodule="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.803993 4664 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.804003 4664 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804222 4664 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804233 4664 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804244 4664 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804257 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804267 4664 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804275 4664 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804284 4664 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804294 4664 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804302 4664 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804311 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804322 4664 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804334 4664 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804343 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804351 4664 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804360 4664 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804369 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804379 4664 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804387 4664 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804395 4664 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804403 4664 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804412 4664 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804424 4664 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804432 4664 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804441 4664 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804449 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804457 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804466 4664 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804477 4664 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804487 4664 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804496 4664 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804505 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804513 4664 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804522 4664 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804530 4664 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804539 4664 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804550 4664 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804561 4664 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804572 4664 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804583 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804592 4664 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804601 4664 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804610 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804619 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804629 4664 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804637 4664 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804646 4664 feature_gate.go:330] unrecognized feature gate: Example Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804654 4664 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804663 4664 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804671 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804680 4664 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804688 4664 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804696 4664 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804705 4664 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804720 4664 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804729 4664 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804738 4664 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804746 4664 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804754 4664 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804763 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804772 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804780 4664 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804788 4664 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804822 4664 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804831 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804840 4664 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804848 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804857 4664 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804868 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.804877 4664 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.805227 4664 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.805238 4664 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.805252 4664 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.821187 4664 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.821250 4664 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821558 4664 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821584 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821596 4664 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821604 4664 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821615 4664 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821624 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821633 4664 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821641 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821649 4664 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821658 4664 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821667 4664 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821683 4664 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821695 4664 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821706 4664 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821715 4664 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821724 4664 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821732 4664 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821740 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821748 4664 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821756 4664 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821765 4664 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821773 4664 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821781 4664 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821789 4664 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821866 4664 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821907 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821920 4664 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821931 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821944 4664 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821954 4664 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821967 4664 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821976 4664 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821987 4664 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.821999 4664 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822011 4664 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822021 4664 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822041 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822051 4664 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822060 4664 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822069 4664 feature_gate.go:330] unrecognized feature gate: Example Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822078 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822091 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822101 4664 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822111 4664 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822120 4664 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822129 4664 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822138 4664 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822147 4664 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822156 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822164 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822180 4664 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822189 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822199 4664 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822208 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822217 4664 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822230 4664 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822257 4664 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822269 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822279 4664 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822288 4664 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822297 4664 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822306 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822321 4664 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822330 4664 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822339 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822348 4664 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822356 4664 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822368 4664 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822380 4664 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822390 4664 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.822400 4664 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.822416 4664 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823203 4664 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823225 4664 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823235 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823243 4664 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823253 4664 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823262 4664 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823270 4664 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823278 4664 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823286 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823295 4664 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823304 4664 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823312 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823320 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823328 4664 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823337 4664 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823346 4664 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823355 4664 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823364 4664 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823377 4664 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823389 4664 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823397 4664 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823405 4664 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823413 4664 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823424 4664 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823434 4664 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823455 4664 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823464 4664 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823472 4664 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823484 4664 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823494 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823503 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823513 4664 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823522 4664 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823530 4664 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823538 4664 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823546 4664 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823555 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823563 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823571 4664 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823579 4664 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823587 4664 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823594 4664 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823603 4664 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823612 4664 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823620 4664 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823628 4664 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823636 4664 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823643 4664 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823651 4664 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823659 4664 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823666 4664 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823674 4664 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823682 4664 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823690 4664 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823698 4664 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823706 4664 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823714 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823723 4664 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823731 4664 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823739 4664 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823747 4664 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823755 4664 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823763 4664 feature_gate.go:330] unrecognized feature gate: Example Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823770 4664 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823782 4664 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823790 4664 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823834 4664 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823845 4664 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823856 4664 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823868 4664 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.823878 4664 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.823893 4664 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.825144 4664 server.go:940] "Client rotation is on, will bootstrap in background" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.836260 4664 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.836413 4664 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.838620 4664 server.go:997] "Starting client certificate rotation" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.838674 4664 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.838897 4664 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-17 03:04:28.844404988 +0000 UTC Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.839003 4664 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1556h17m56.005408219s for next certificate rotation Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.869308 4664 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.873341 4664 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.898301 4664 log.go:25] "Validated CRI v1 runtime API" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.936308 4664 log.go:25] "Validated CRI v1 image API" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.939359 4664 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.947321 4664 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-10-13-06-40-50-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.947368 4664 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.959640 4664 manager.go:217] Machine: {Timestamp:2025-10-13 06:46:32.956925518 +0000 UTC m=+0.644370730 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2799998 MemoryCapacity:25199476736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:56030f0e-24c6-4539-80c0-32dccf756401 BootID:91d5f35e-3847-46d0-ad62-97041d9b1127 Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039898624 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599738368 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:3076108 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599738368 Type:vfs Inodes:3076108 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:12:41:5c Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:12:41:5c Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:d0:db:11 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:89:d1:63 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:df:57:96 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:a8:8f:6b Speed:-1 Mtu:1496} {Name:eth10 MacAddress:66:be:4a:f6:50:c3 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:26:5c:53:c4:fc:f0 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199476736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.959879 4664 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.960017 4664 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.962509 4664 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.962684 4664 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.962722 4664 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.963184 4664 topology_manager.go:138] "Creating topology manager with none policy" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.963199 4664 container_manager_linux.go:303] "Creating device plugin manager" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.963710 4664 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.963746 4664 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.964043 4664 state_mem.go:36] "Initialized new in-memory state store" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.964127 4664 server.go:1245] "Using root directory" path="/var/lib/kubelet" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.967360 4664 kubelet.go:418] "Attempting to sync node with API server" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.967387 4664 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.967406 4664 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.967420 4664 kubelet.go:324] "Adding apiserver pod source" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.967436 4664 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.972566 4664 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.973916 4664 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.975245 4664 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977534 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977579 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977591 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977602 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977621 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977633 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977642 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977659 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977671 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977685 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977701 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.977711 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.978345 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:32 crc kubenswrapper[4664]: E1013 06:46:32.978492 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.978355 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:32 crc kubenswrapper[4664]: E1013 06:46:32.978627 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.979380 4664 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.979988 4664 server.go:1280] "Started kubelet" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.981292 4664 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.981290 4664 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.981825 4664 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.981858 4664 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.982235 4664 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.982353 4664 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.982387 4664 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 23:43:57.660736701 +0000 UTC Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.982459 4664 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1600h57m24.678282515s for next certificate rotation Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.982464 4664 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.982443 4664 volume_manager.go:287] "The desired_state_of_world populator starts" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.982587 4664 volume_manager.go:289] "Starting Kubelet Volume Manager" Oct 13 06:46:32 crc systemd[1]: Started Kubernetes Kubelet. Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.983391 4664 server.go:460] "Adding debug handlers to kubelet server" Oct 13 06:46:32 crc kubenswrapper[4664]: W1013 06:46:32.983374 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:32 crc kubenswrapper[4664]: E1013 06:46:32.983469 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.984485 4664 factory.go:55] Registering systemd factory Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.984509 4664 factory.go:221] Registration of the systemd container factory successfully Oct 13 06:46:32 crc kubenswrapper[4664]: E1013 06:46:32.984668 4664 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 13 06:46:32 crc kubenswrapper[4664]: E1013 06:46:32.985170 4664 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="200ms" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.986107 4664 factory.go:153] Registering CRI-O factory Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.986148 4664 factory.go:221] Registration of the crio container factory successfully Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.986530 4664 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.986727 4664 factory.go:103] Registering Raw factory Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.987028 4664 manager.go:1196] Started watching for new ooms in manager Oct 13 06:46:32 crc kubenswrapper[4664]: E1013 06:46:32.987736 4664 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.223:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186dfa13f431acdd default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-10-13 06:46:32.979950813 +0000 UTC m=+0.667396005,LastTimestamp:2025-10-13 06:46:32.979950813 +0000 UTC m=+0.667396005,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.990956 4664 manager.go:319] Starting recovery of all containers Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.998857 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.998974 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999001 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999024 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999045 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999066 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999087 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999107 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999133 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999158 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999179 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999200 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999223 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999247 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999268 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999289 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999314 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999335 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999359 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Oct 13 06:46:32 crc kubenswrapper[4664]: I1013 06:46:32.999382 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000058 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000120 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000152 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000188 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000213 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000244 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000277 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000316 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000342 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000372 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000398 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000426 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000451 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000473 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000505 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000529 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000557 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000580 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000603 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000634 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000657 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000686 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000709 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000732 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000761 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000786 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000842 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000865 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000888 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000919 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000942 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.000963 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.001004 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.001036 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.001068 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.001100 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.001133 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.001156 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007067 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007142 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007166 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007193 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007210 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007228 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007253 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007271 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007295 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007312 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007328 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007349 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007368 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007390 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007409 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007425 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007443 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007459 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007482 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007498 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007516 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007538 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007555 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007577 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007593 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007611 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007631 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007647 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007665 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007684 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007699 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007719 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007736 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007752 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007773 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007811 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007852 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007869 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007885 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007906 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007922 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007943 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007959 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.007977 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008000 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008015 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008045 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008067 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008083 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008105 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008128 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008148 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008171 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008187 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008208 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008226 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008243 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008263 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008279 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008294 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008314 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.008330 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010396 4664 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010435 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010462 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010477 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010497 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010514 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010529 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010548 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010562 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010580 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010599 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010616 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010634 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010651 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010671 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010687 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010731 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.010971 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011025 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011046 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011064 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011084 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011104 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011121 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011163 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011182 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011203 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011268 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011285 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011302 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011324 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011340 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011359 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011374 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011391 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011409 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011427 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011446 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011463 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011478 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011497 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011512 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011533 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011549 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011567 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011586 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011603 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011620 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011637 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011652 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011671 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011686 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011702 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011719 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011735 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011752 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011769 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011784 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011830 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011847 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011866 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011881 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011897 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011914 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011931 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011948 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011968 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.011983 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012002 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012017 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012035 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012049 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012065 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012083 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012100 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012119 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012195 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012210 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012231 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012246 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012262 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012280 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012297 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012316 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012334 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012349 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012368 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012383 4664 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012397 4664 reconstruct.go:97] "Volume reconstruction finished" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.012407 4664 reconciler.go:26] "Reconciler: start to sync state" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.024230 4664 manager.go:324] Recovery completed Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.038220 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.040173 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.040231 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.040246 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.040891 4664 cpu_manager.go:225] "Starting CPU manager" policy="none" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.040915 4664 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.040944 4664 state_mem.go:36] "Initialized new in-memory state store" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.043153 4664 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.045476 4664 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.045611 4664 status_manager.go:217] "Starting to sync pod status with apiserver" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.045650 4664 kubelet.go:2335] "Starting kubelet main sync loop" Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.045744 4664 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Oct 13 06:46:33 crc kubenswrapper[4664]: W1013 06:46:33.049131 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.049306 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.062252 4664 policy_none.go:49] "None policy: Start" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.064863 4664 memory_manager.go:170] "Starting memorymanager" policy="None" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.064919 4664 state_mem.go:35] "Initializing new in-memory state store" Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.085126 4664 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.114088 4664 manager.go:334] "Starting Device Plugin manager" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.114148 4664 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.114162 4664 server.go:79] "Starting device plugin registration server" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.114764 4664 eviction_manager.go:189] "Eviction manager: starting control loop" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.114814 4664 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.115379 4664 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.115458 4664 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.115470 4664 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.126364 4664 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.146717 4664 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.146865 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.148517 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.148597 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.148613 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.148928 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.149262 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.149327 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.150671 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.150735 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.150748 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.150940 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.151094 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.151110 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.151140 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.151175 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.151199 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153016 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153107 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153124 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153034 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153183 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153196 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153418 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.153897 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.154049 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.155459 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.155488 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.155510 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.155517 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.155523 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.155533 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.155775 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.156191 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.156288 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.157000 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.157037 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.157052 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.157524 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.157578 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.158316 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.158338 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.158339 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.158365 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.158372 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.158381 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.186229 4664 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="400ms" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.215626 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.215723 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.215733 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.215764 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.215829 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.215866 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.215899 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.216000 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.216383 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.216447 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.216553 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.216615 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.217022 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.217084 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.217123 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.217271 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.218580 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.218659 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.218690 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.218736 4664 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.219377 4664 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319004 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319113 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319150 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319184 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319217 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319247 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319278 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319309 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319342 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319370 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319401 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319429 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319459 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319487 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.319516 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320185 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320243 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320253 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320311 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320450 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320195 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320534 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320654 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320656 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320683 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320706 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320713 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320732 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320738 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.320762 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.420258 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.422776 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.422836 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.422850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.422885 4664 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.423244 4664 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.489507 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.521965 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.526774 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: W1013 06:46:33.549387 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-b887f7bcb0542ee7aed9c020bfdd3ba0ac1aa5d194fd4cca62f087aac0bc1d92 WatchSource:0}: Error finding container b887f7bcb0542ee7aed9c020bfdd3ba0ac1aa5d194fd4cca62f087aac0bc1d92: Status 404 returned error can't find the container with id b887f7bcb0542ee7aed9c020bfdd3ba0ac1aa5d194fd4cca62f087aac0bc1d92 Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.551789 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.557096 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:33 crc kubenswrapper[4664]: W1013 06:46:33.564436 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-89a4911e87ad4669ddc24c340f79a69b3047016617b65959d2b1857da1c8e1ce WatchSource:0}: Error finding container 89a4911e87ad4669ddc24c340f79a69b3047016617b65959d2b1857da1c8e1ce: Status 404 returned error can't find the container with id 89a4911e87ad4669ddc24c340f79a69b3047016617b65959d2b1857da1c8e1ce Oct 13 06:46:33 crc kubenswrapper[4664]: W1013 06:46:33.570549 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-e70f7b75cb2981edc27ef414a59de64bfeb5026b65fb761e723f1d78e6c7eedf WatchSource:0}: Error finding container e70f7b75cb2981edc27ef414a59de64bfeb5026b65fb761e723f1d78e6c7eedf: Status 404 returned error can't find the container with id e70f7b75cb2981edc27ef414a59de64bfeb5026b65fb761e723f1d78e6c7eedf Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.587517 4664 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="800ms" Oct 13 06:46:33 crc kubenswrapper[4664]: W1013 06:46:33.599228 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-62ca408428e4e6dec1cc2d798c6995b54d7022a38d1bc1052313092f2f216501 WatchSource:0}: Error finding container 62ca408428e4e6dec1cc2d798c6995b54d7022a38d1bc1052313092f2f216501: Status 404 returned error can't find the container with id 62ca408428e4e6dec1cc2d798c6995b54d7022a38d1bc1052313092f2f216501 Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.824217 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.826413 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.826474 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.826492 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.826533 4664 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 13 06:46:33 crc kubenswrapper[4664]: E1013 06:46:33.826891 4664 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Oct 13 06:46:33 crc kubenswrapper[4664]: I1013 06:46:33.983663 4664 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:34 crc kubenswrapper[4664]: W1013 06:46:34.006540 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:34 crc kubenswrapper[4664]: E1013 06:46:34.006665 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.051209 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f496367831668ba14f5afd8f6ac356a7c582d42e992300549615d36d05bc80fc"} Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.052691 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"62ca408428e4e6dec1cc2d798c6995b54d7022a38d1bc1052313092f2f216501"} Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.053696 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e70f7b75cb2981edc27ef414a59de64bfeb5026b65fb761e723f1d78e6c7eedf"} Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.054827 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"89a4911e87ad4669ddc24c340f79a69b3047016617b65959d2b1857da1c8e1ce"} Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.055644 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"b887f7bcb0542ee7aed9c020bfdd3ba0ac1aa5d194fd4cca62f087aac0bc1d92"} Oct 13 06:46:34 crc kubenswrapper[4664]: W1013 06:46:34.156348 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:34 crc kubenswrapper[4664]: E1013 06:46:34.156450 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:34 crc kubenswrapper[4664]: W1013 06:46:34.295632 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:34 crc kubenswrapper[4664]: E1013 06:46:34.295880 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:34 crc kubenswrapper[4664]: E1013 06:46:34.388900 4664 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="1.6s" Oct 13 06:46:34 crc kubenswrapper[4664]: W1013 06:46:34.459185 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:34 crc kubenswrapper[4664]: E1013 06:46:34.459272 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.627371 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.629239 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.629281 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.629293 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.629320 4664 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 13 06:46:34 crc kubenswrapper[4664]: E1013 06:46:34.629757 4664 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Oct 13 06:46:34 crc kubenswrapper[4664]: I1013 06:46:34.984262 4664 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.062185 4664 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2" exitCode=0 Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.062325 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.062408 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.064209 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.064244 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.064255 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.064407 4664 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082" exitCode=0 Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.064485 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.064584 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.066217 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.066237 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.066245 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.070365 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.070409 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.070423 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.070434 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.070533 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.072233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.072308 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.072338 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.073238 4664 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99" exitCode=0 Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.073338 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.073380 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.074443 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.074487 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.074500 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.079554 4664 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3" exitCode=0 Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.079595 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3"} Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.079714 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.080436 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.080473 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.080484 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.086241 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.087250 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.087390 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.087512 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:35 crc kubenswrapper[4664]: I1013 06:46:35.984195 4664 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:35 crc kubenswrapper[4664]: E1013 06:46:35.989943 4664 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="3.2s" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.084299 4664 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633" exitCode=0 Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.084397 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.084499 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.085704 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.085733 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.085743 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.087876 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"514f7f9cd13c473b6ec5f2076c2ac0391568f1d0a41490f3c3ab1b56f21f75d4"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.087904 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.088968 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.089004 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.089018 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.098878 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.098912 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.098926 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.099049 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.100638 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.100661 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.100674 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.105908 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.106094 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.106246 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.106272 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0"} Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.107040 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.107122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.107189 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.229936 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.231370 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.231419 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.231430 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.231452 4664 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 13 06:46:36 crc kubenswrapper[4664]: E1013 06:46:36.232121 4664 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Oct 13 06:46:36 crc kubenswrapper[4664]: W1013 06:46:36.569116 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Oct 13 06:46:36 crc kubenswrapper[4664]: E1013 06:46:36.569239 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Oct 13 06:46:36 crc kubenswrapper[4664]: I1013 06:46:36.793039 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.118813 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402"} Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.118871 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205"} Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.118847 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.120412 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.120453 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.120465 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.122864 4664 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5" exitCode=0 Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.122971 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.123037 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.123377 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5"} Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.123480 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.123765 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.123892 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.124975 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125003 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125016 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125025 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125048 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125060 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125186 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125204 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125389 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125491 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125567 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:37 crc kubenswrapper[4664]: I1013 06:46:37.125623 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.128539 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659"} Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.128590 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be"} Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.128603 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c"} Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.128613 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.128619 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.128616 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e"} Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.128726 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.129719 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.129746 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.129758 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.129859 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.129938 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:38 crc kubenswrapper[4664]: I1013 06:46:38.129951 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.036660 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.037002 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.040032 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.040082 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.040094 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.144435 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.144405 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9"} Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.144507 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.146722 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.146777 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.146824 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.148088 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.148180 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.148202 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.433234 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.435258 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.435309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.435342 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.435384 4664 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.793757 4664 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 06:46:39 crc kubenswrapper[4664]: I1013 06:46:39.793939 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.101717 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.147826 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.148358 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.149194 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.149301 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.149321 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.150460 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.150502 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:40 crc kubenswrapper[4664]: I1013 06:46:40.150514 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.210521 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.210885 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.212581 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.212640 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.212664 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.893850 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.894173 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.895561 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.895736 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:41 crc kubenswrapper[4664]: I1013 06:46:41.895868 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.184536 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.185373 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.187231 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.187288 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.187306 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.333331 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.333705 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.336570 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.336634 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.336649 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:42 crc kubenswrapper[4664]: I1013 06:46:42.994565 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:43 crc kubenswrapper[4664]: I1013 06:46:43.003107 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:43 crc kubenswrapper[4664]: E1013 06:46:43.126485 4664 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 13 06:46:43 crc kubenswrapper[4664]: I1013 06:46:43.157560 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:43 crc kubenswrapper[4664]: I1013 06:46:43.160989 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:43 crc kubenswrapper[4664]: I1013 06:46:43.161107 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:43 crc kubenswrapper[4664]: I1013 06:46:43.161141 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:44 crc kubenswrapper[4664]: I1013 06:46:44.160762 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:44 crc kubenswrapper[4664]: I1013 06:46:44.162156 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:44 crc kubenswrapper[4664]: I1013 06:46:44.162224 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:44 crc kubenswrapper[4664]: I1013 06:46:44.162235 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:44 crc kubenswrapper[4664]: I1013 06:46:44.165509 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:45 crc kubenswrapper[4664]: I1013 06:46:45.164237 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:45 crc kubenswrapper[4664]: I1013 06:46:45.166618 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:45 crc kubenswrapper[4664]: I1013 06:46:45.166799 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:45 crc kubenswrapper[4664]: I1013 06:46:45.166956 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:46 crc kubenswrapper[4664]: I1013 06:46:46.482442 4664 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 13 06:46:46 crc kubenswrapper[4664]: I1013 06:46:46.482514 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 13 06:46:46 crc kubenswrapper[4664]: W1013 06:46:46.667845 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Oct 13 06:46:46 crc kubenswrapper[4664]: I1013 06:46:46.667930 4664 trace.go:236] Trace[666381762]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Oct-2025 06:46:36.666) (total time: 10001ms): Oct 13 06:46:46 crc kubenswrapper[4664]: Trace[666381762]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:46:46.667) Oct 13 06:46:46 crc kubenswrapper[4664]: Trace[666381762]: [10.001123632s] [10.001123632s] END Oct 13 06:46:46 crc kubenswrapper[4664]: E1013 06:46:46.667953 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Oct 13 06:46:46 crc kubenswrapper[4664]: W1013 06:46:46.878713 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Oct 13 06:46:46 crc kubenswrapper[4664]: I1013 06:46:46.878839 4664 trace.go:236] Trace[732566683]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Oct-2025 06:46:36.876) (total time: 10002ms): Oct 13 06:46:46 crc kubenswrapper[4664]: Trace[732566683]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (06:46:46.878) Oct 13 06:46:46 crc kubenswrapper[4664]: Trace[732566683]: [10.00270479s] [10.00270479s] END Oct 13 06:46:46 crc kubenswrapper[4664]: E1013 06:46:46.878863 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Oct 13 06:46:46 crc kubenswrapper[4664]: I1013 06:46:46.984730 4664 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Oct 13 06:46:47 crc kubenswrapper[4664]: W1013 06:46:47.419217 4664 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Oct 13 06:46:47 crc kubenswrapper[4664]: I1013 06:46:47.419386 4664 trace.go:236] Trace[693498020]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Oct-2025 06:46:37.417) (total time: 10001ms): Oct 13 06:46:47 crc kubenswrapper[4664]: Trace[693498020]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:46:47.419) Oct 13 06:46:47 crc kubenswrapper[4664]: Trace[693498020]: [10.001523176s] [10.001523176s] END Oct 13 06:46:47 crc kubenswrapper[4664]: E1013 06:46:47.419429 4664 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Oct 13 06:46:47 crc kubenswrapper[4664]: I1013 06:46:47.519596 4664 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 13 06:46:47 crc kubenswrapper[4664]: I1013 06:46:47.520226 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 13 06:46:47 crc kubenswrapper[4664]: I1013 06:46:47.526915 4664 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 13 06:46:47 crc kubenswrapper[4664]: I1013 06:46:47.527002 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 13 06:46:49 crc kubenswrapper[4664]: I1013 06:46:49.794826 4664 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 06:46:49 crc kubenswrapper[4664]: I1013 06:46:49.795362 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.216057 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.216371 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.217883 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.217988 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.218051 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.225015 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.313252 4664 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Oct 13 06:46:51 crc kubenswrapper[4664]: I1013 06:46:51.809710 4664 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.182845 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.183587 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.183688 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.183759 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.369685 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.369971 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.371620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.371684 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.371703 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.388318 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Oct 13 06:46:52 crc kubenswrapper[4664]: E1013 06:46:52.500668 4664 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.505286 4664 trace.go:236] Trace[1645733884]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Oct-2025 06:46:42.103) (total time: 10402ms): Oct 13 06:46:52 crc kubenswrapper[4664]: Trace[1645733884]: ---"Objects listed" error: 10402ms (06:46:52.505) Oct 13 06:46:52 crc kubenswrapper[4664]: Trace[1645733884]: [10.402203043s] [10.402203043s] END Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.505325 4664 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.505626 4664 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Oct 13 06:46:52 crc kubenswrapper[4664]: E1013 06:46:52.506829 4664 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.618304 4664 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37086->192.168.126.11:17697: read: connection reset by peer" start-of-body= Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.618378 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37086->192.168.126.11:17697: read: connection reset by peer" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.618848 4664 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37092->192.168.126.11:17697: read: connection reset by peer" start-of-body= Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.619018 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37092->192.168.126.11:17697: read: connection reset by peer" Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.619576 4664 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 13 06:46:52 crc kubenswrapper[4664]: I1013 06:46:52.619721 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.030204 4664 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.187509 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.189506 4664 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402" exitCode=255 Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.190304 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402"} Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.356686 4664 scope.go:117] "RemoveContainer" containerID="7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.980378 4664 apiserver.go:52] "Watching apiserver" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.989997 4664 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.990645 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-machine-config-operator/machine-config-daemon-hkzpl","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-ovn-kubernetes/ovnkube-node-mjr5r","openshift-dns/node-resolver-96lj2","openshift-multus/multus-bg4kt","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-multus/multus-additional-cni-plugins-xh2nz"] Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.991105 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.991189 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.991237 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:46:53 crc kubenswrapper[4664]: E1013 06:46:53.991291 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:46:53 crc kubenswrapper[4664]: E1013 06:46:53.991343 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.992348 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.992573 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.994109 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:53 crc kubenswrapper[4664]: E1013 06:46:53.994184 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.995097 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.995518 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.995836 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bg4kt" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.996121 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:53 crc kubenswrapper[4664]: I1013 06:46:53.996172 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.026487 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.028576 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.029791 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.040553 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.041835 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.041906 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.041922 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.041835 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.042132 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.042255 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.042680 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.042765 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.042991 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.043199 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.045571 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.045901 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.048705 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.052025 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.052659 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.052713 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.055902 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059281 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059300 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059352 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059385 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059302 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059308 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059546 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.059635 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.060710 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.061017 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.075816 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.083983 4664 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.094658 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.109093 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115151 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115190 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115215 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115233 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115255 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115475 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115518 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115658 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115753 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115861 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115970 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116059 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116139 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116231 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116309 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116455 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116564 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.115917 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116649 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116763 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116803 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116824 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116843 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116848 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116864 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116882 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116900 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116917 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116935 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116979 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116997 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117016 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117035 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117052 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117069 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117087 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117104 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117162 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117182 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117201 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117218 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117235 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117252 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117270 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117285 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117300 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117315 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117330 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117349 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117364 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117379 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117396 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117413 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117428 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117475 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117498 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117520 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117544 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117565 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117591 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117617 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117642 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117664 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117688 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117712 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117734 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117871 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117903 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117928 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117980 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118005 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118029 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118054 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118078 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118104 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118129 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118153 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118175 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118197 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118222 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118248 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118271 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118292 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118314 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118336 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118359 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118380 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118403 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118427 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118456 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118483 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118507 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118530 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118553 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118590 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117011 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117028 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118645 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116168 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116316 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116224 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116347 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116445 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116528 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116582 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116622 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117185 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117239 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117367 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117651 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117843 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117973 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.117974 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118179 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118235 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118241 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118376 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118444 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118611 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118705 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118818 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.119038 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.119283 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.119405 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.119500 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.119716 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.120220 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.116067 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.120915 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.121298 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.121371 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.121656 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.122502 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.125005 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.125082 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.125241 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.125350 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:46:54.625327613 +0000 UTC m=+22.312772805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.125534 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.125401 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.125898 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.125953 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126028 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126116 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126314 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.118615 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126377 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126398 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126429 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126443 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126455 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126480 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126505 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126528 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126551 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126573 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126597 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126619 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126642 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126663 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126683 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126704 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126738 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126758 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126779 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126819 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126842 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126868 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126893 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126918 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126944 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126970 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126997 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127019 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127042 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127065 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127087 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127112 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127136 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127159 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127183 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127207 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127230 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127256 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127282 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127305 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127327 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127350 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127377 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127401 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127425 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127454 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127477 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127505 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127530 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127558 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127591 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127618 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127657 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127685 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127711 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127735 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127762 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127787 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127854 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127876 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127895 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127936 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127958 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127987 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128012 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128036 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128061 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128088 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128113 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128135 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128158 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128182 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128205 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128230 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128255 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128280 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128307 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128335 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128362 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128390 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128415 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128473 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128501 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128528 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128552 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128571 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128595 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128613 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128629 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128645 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128663 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128680 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128699 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128719 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128741 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128766 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128788 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128828 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128851 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128877 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128902 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128922 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128940 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128958 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128974 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129039 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-systemd\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129062 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-netns\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129077 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-kubelet\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129094 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-cnibin\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129280 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129300 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-etc-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129600 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85x8c\" (UniqueName: \"kubernetes.io/projected/62a87cfd-bdfc-4cf3-a081-b204fbe37d5a-kube-api-access-85x8c\") pod \"node-resolver-96lj2\" (UID: \"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\") " pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129618 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-socket-dir-parent\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129633 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-log-socket\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129648 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-bin\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129663 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-config\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129689 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhrws\" (UniqueName: \"kubernetes.io/projected/35504ef1-729c-4404-bd49-0d82bf23ccbb-kube-api-access-vhrws\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129713 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129732 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-multus-certs\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129752 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129769 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-netd\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129788 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137663 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137702 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/62a87cfd-bdfc-4cf3-a081-b204fbe37d5a-hosts-file\") pod \"node-resolver-96lj2\" (UID: \"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\") " pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137735 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rv6j\" (UniqueName: \"kubernetes.io/projected/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-kube-api-access-6rv6j\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137759 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137779 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/35504ef1-729c-4404-bd49-0d82bf23ccbb-rootfs\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137810 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-cni-multus\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137828 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-systemd-units\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137845 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-slash\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137862 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-script-lib\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137879 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-k8s-cni-cncf-io\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137903 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137923 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-cnibin\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137940 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-cni-bin\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137956 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-hostroot\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137975 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137993 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-var-lib-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138008 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138027 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnscf\" (UniqueName: \"kubernetes.io/projected/a1406c08-b6a4-404d-9b44-05ee214a555d-kube-api-access-wnscf\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138043 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-system-cni-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138062 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-os-release\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138080 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-cni-binary-copy\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138106 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138121 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-etc-kubernetes\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138138 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/74eb7029-982d-4294-bed0-63ffe7281479-ovn-node-metrics-cert\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138156 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-conf-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138173 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138197 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138221 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138244 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138266 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-daemon-config\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138298 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/35504ef1-729c-4404-bd49-0d82bf23ccbb-mcd-auth-proxy-config\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138325 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-kubelet\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138344 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-ovn\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138363 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-node-log\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138383 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-ovn-kubernetes\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138404 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a1406c08-b6a4-404d-9b44-05ee214a555d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138427 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138446 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-cni-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138465 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/35504ef1-729c-4404-bd49-0d82bf23ccbb-proxy-tls\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138480 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-system-cni-dir\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138496 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a1406c08-b6a4-404d-9b44-05ee214a555d-cni-binary-copy\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138511 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knvjm\" (UniqueName: \"kubernetes.io/projected/74eb7029-982d-4294-bed0-63ffe7281479-kube-api-access-knvjm\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138530 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-os-release\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138545 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-netns\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138561 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-env-overrides\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138584 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138608 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138694 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138708 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138719 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138731 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138743 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138753 4664 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138763 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138773 4664 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138784 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138822 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138834 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138845 4664 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138855 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138867 4664 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138878 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138888 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138899 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138910 4664 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138920 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138931 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138940 4664 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138950 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138960 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138970 4664 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138980 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.138992 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139003 4664 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139013 4664 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139023 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139035 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139045 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139056 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139066 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139076 4664 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139087 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139097 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139110 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139119 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139128 4664 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139138 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139149 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139159 4664 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139169 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139179 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139189 4664 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139200 4664 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139210 4664 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139221 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139230 4664 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139239 4664 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139248 4664 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139258 4664 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139268 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.139929 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.146070 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.148870 4664 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.170237 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.126972 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127266 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127565 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127588 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171294 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127705 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.127843 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128059 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128138 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128169 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128252 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128407 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171380 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128361 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128482 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.128829 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129235 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129489 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.129676 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171469 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171462 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.131106 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.131296 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.131323 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.131532 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.131666 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.131923 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.131981 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.132050 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.132225 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.132773 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.133880 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.134249 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.134484 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137089 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137325 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137325 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137591 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.137819 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.140237 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.140271 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.140396 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.140485 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.140778 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.141671 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.142063 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.142473 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.142698 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.143271 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.143666 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.143729 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.202769 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:54.702732467 +0000 UTC m=+22.390177659 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.203219 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.203263 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.203278 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.203375 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:54.703350874 +0000 UTC m=+22.390796066 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.143787 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.205429 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.205781 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.206049 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.206082 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.206111 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.143885 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.143945 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.144096 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.144325 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.144552 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.144550 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.144786 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.145008 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.145183 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.145412 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.145585 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.146006 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.146209 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.148173 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.148828 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.149282 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.149334 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.149462 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.149507 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.153723 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.154064 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.154300 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.154653 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.167974 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.168174 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.169941 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.170403 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.170488 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.170568 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.170612 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.170662 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.170946 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171052 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171152 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171164 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171199 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.171227 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.173459 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.175183 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.175626 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.175871 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.181393 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.183850 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184092 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184167 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184357 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184512 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184660 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.206529 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:54.706507673 +0000 UTC m=+22.393952865 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184670 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184771 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184814 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.184868 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.185193 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.185299 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.185566 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.199707 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.185762 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186196 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186456 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186579 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186626 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186758 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186768 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186951 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.187068 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.193306 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.193685 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.193958 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.194087 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.194530 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.194787 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.195142 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.197248 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.197830 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.198107 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.198427 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.198630 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.207210 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.186670 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.198937 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.198952 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.199049 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.199297 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.199341 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.199765 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.207618 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.208454 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.215394 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.228775 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.230209 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.232266 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a"} Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.232305 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.232349 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.232367 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.232445 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:54.732419508 +0000 UTC m=+22.419864700 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.232325 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.236062 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241675 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-conf-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241708 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-daemon-config\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241731 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241793 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-ovn-kubernetes\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241834 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/35504ef1-729c-4404-bd49-0d82bf23ccbb-mcd-auth-proxy-config\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241850 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-kubelet\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241867 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-ovn\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241882 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-node-log\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241897 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a1406c08-b6a4-404d-9b44-05ee214a555d-cni-binary-copy\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241913 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a1406c08-b6a4-404d-9b44-05ee214a555d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241929 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241946 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-cni-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241963 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/35504ef1-729c-4404-bd49-0d82bf23ccbb-proxy-tls\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241977 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-system-cni-dir\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.241996 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knvjm\" (UniqueName: \"kubernetes.io/projected/74eb7029-982d-4294-bed0-63ffe7281479-kube-api-access-knvjm\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242010 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-env-overrides\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242026 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-os-release\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242040 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-netns\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242056 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-kubelet\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242070 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-systemd\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242085 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-netns\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242099 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-cnibin\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242115 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242128 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-etc-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242145 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85x8c\" (UniqueName: \"kubernetes.io/projected/62a87cfd-bdfc-4cf3-a081-b204fbe37d5a-kube-api-access-85x8c\") pod \"node-resolver-96lj2\" (UID: \"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\") " pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242161 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-socket-dir-parent\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242184 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-log-socket\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242198 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-bin\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242213 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-config\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242228 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhrws\" (UniqueName: \"kubernetes.io/projected/35504ef1-729c-4404-bd49-0d82bf23ccbb-kube-api-access-vhrws\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242243 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-multus-certs\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242258 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242271 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-netd\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242294 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/62a87cfd-bdfc-4cf3-a081-b204fbe37d5a-hosts-file\") pod \"node-resolver-96lj2\" (UID: \"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\") " pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242308 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rv6j\" (UniqueName: \"kubernetes.io/projected/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-kube-api-access-6rv6j\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242322 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-script-lib\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242336 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/35504ef1-729c-4404-bd49-0d82bf23ccbb-rootfs\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242351 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-cni-multus\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242365 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-systemd-units\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242379 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-slash\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242394 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-k8s-cni-cncf-io\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242415 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-cnibin\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242429 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-cni-bin\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242445 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-cni-binary-copy\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242462 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-hostroot\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242476 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-var-lib-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242492 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242507 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnscf\" (UniqueName: \"kubernetes.io/projected/a1406c08-b6a4-404d-9b44-05ee214a555d-kube-api-access-wnscf\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242521 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-system-cni-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242536 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-os-release\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242550 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/74eb7029-982d-4294-bed0-63ffe7281479-ovn-node-metrics-cert\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242565 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-etc-kubernetes\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242605 4664 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242616 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242627 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242636 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242645 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242654 4664 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242662 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242671 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242679 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242688 4664 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242698 4664 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242706 4664 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242715 4664 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242725 4664 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242733 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242741 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242749 4664 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242759 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242767 4664 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242775 4664 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.242784 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.243169 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-socket-dir-parent\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.243254 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-conf-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244167 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-daemon-config\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244225 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244258 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-ovn-kubernetes\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244696 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244823 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-systemd-units\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244880 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/35504ef1-729c-4404-bd49-0d82bf23ccbb-mcd-auth-proxy-config\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244906 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-bin\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244884 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-log-socket\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.244935 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-kubelet\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245192 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-multus-certs\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245231 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245255 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-netd\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245294 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/62a87cfd-bdfc-4cf3-a081-b204fbe37d5a-hosts-file\") pod \"node-resolver-96lj2\" (UID: \"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\") " pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245417 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-config\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245471 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-ovn\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245504 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-node-log\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245954 4664 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245979 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.245990 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246003 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246013 4664 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246021 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246032 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246041 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246050 4664 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246060 4664 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246069 4664 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246078 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246087 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246117 4664 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246117 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a1406c08-b6a4-404d-9b44-05ee214a555d-cni-binary-copy\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246131 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246141 4664 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246152 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246161 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246170 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246178 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246203 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246212 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246222 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246230 4664 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246239 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246262 4664 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246271 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246279 4664 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246289 4664 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246297 4664 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246306 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246316 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246327 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246335 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246344 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246352 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246361 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246370 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246379 4664 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246388 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246399 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246409 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246417 4664 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246426 4664 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246434 4664 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246442 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246451 4664 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246461 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246471 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246480 4664 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246488 4664 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246497 4664 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246506 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246514 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246524 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246532 4664 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246541 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246550 4664 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246559 4664 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246567 4664 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246567 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a1406c08-b6a4-404d-9b44-05ee214a555d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246577 4664 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246603 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-etc-kubernetes\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246624 4664 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246642 4664 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246651 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-slash\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246654 4664 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246677 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246687 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246689 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246697 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246708 4664 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246717 4664 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246729 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246738 4664 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246748 4664 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246768 4664 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246778 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246787 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246835 4664 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246845 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246855 4664 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246863 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246872 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246882 4664 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246891 4664 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246900 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246908 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246916 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246924 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246933 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246942 4664 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246950 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246959 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246967 4664 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246976 4664 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246987 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.246995 4664 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247004 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247013 4664 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247021 4664 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247030 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247040 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247049 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247058 4664 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247067 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247076 4664 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247085 4664 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247094 4664 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247103 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247113 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247122 4664 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247130 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247477 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-script-lib\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247518 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/35504ef1-729c-4404-bd49-0d82bf23ccbb-rootfs\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247542 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-cni-multus\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.247669 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-kubelet\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248011 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248024 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248035 4664 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248045 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248054 4664 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248063 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248074 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248083 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248092 4664 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248121 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-k8s-cni-cncf-io\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248153 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-cnibin\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248176 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-var-lib-cni-bin\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248574 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-cni-binary-copy\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248608 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-hostroot\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.248631 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-var-lib-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.249201 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.249387 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-system-cni-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.249429 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-os-release\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.249962 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-cnibin\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.250072 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-systemd\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.250113 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-host-run-netns\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.250641 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-env-overrides\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.250690 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-system-cni-dir\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.251085 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a1406c08-b6a4-404d-9b44-05ee214a555d-os-release\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.251124 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-etc-openvswitch\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.251154 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.251207 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-netns\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.251642 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/74eb7029-982d-4294-bed0-63ffe7281479-ovn-node-metrics-cert\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.251890 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-multus-cni-dir\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.257374 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.257595 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/35504ef1-729c-4404-bd49-0d82bf23ccbb-proxy-tls\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.267673 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.270515 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knvjm\" (UniqueName: \"kubernetes.io/projected/74eb7029-982d-4294-bed0-63ffe7281479-kube-api-access-knvjm\") pod \"ovnkube-node-mjr5r\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.271724 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rv6j\" (UniqueName: \"kubernetes.io/projected/2f22066f-5783-48bc-85f8-0fbb2eed7e0b-kube-api-access-6rv6j\") pod \"multus-bg4kt\" (UID: \"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\") " pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.273965 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhrws\" (UniqueName: \"kubernetes.io/projected/35504ef1-729c-4404-bd49-0d82bf23ccbb-kube-api-access-vhrws\") pod \"machine-config-daemon-hkzpl\" (UID: \"35504ef1-729c-4404-bd49-0d82bf23ccbb\") " pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.274469 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85x8c\" (UniqueName: \"kubernetes.io/projected/62a87cfd-bdfc-4cf3-a081-b204fbe37d5a-kube-api-access-85x8c\") pod \"node-resolver-96lj2\" (UID: \"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\") " pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.274592 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnscf\" (UniqueName: \"kubernetes.io/projected/a1406c08-b6a4-404d-9b44-05ee214a555d-kube-api-access-wnscf\") pod \"multus-additional-cni-plugins-xh2nz\" (UID: \"a1406c08-b6a4-404d-9b44-05ee214a555d\") " pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.280123 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.291263 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.303140 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.310578 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.315351 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.316413 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 13 06:46:54 crc kubenswrapper[4664]: W1013 06:46:54.324867 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-aa77c2e9e5c44432c1c594b4500fd462fbc9d1db8910ae490582042fd27d54a3 WatchSource:0}: Error finding container aa77c2e9e5c44432c1c594b4500fd462fbc9d1db8910ae490582042fd27d54a3: Status 404 returned error can't find the container with id aa77c2e9e5c44432c1c594b4500fd462fbc9d1db8910ae490582042fd27d54a3 Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.325860 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.330555 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.332866 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.343920 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-96lj2" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.344317 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.349477 4664 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.349578 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.349674 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bg4kt" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.356916 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.357554 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.362462 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.367974 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.380669 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: W1013 06:46:54.393876 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74eb7029_982d_4294_bed0_63ffe7281479.slice/crio-f989b2b6944d3f4d874741060aa5479bbc22ba41ddedf3aeb9a5d8862358a48b WatchSource:0}: Error finding container f989b2b6944d3f4d874741060aa5479bbc22ba41ddedf3aeb9a5d8862358a48b: Status 404 returned error can't find the container with id f989b2b6944d3f4d874741060aa5479bbc22ba41ddedf3aeb9a5d8862358a48b Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.396433 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.407368 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: W1013 06:46:54.421244 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f22066f_5783_48bc_85f8_0fbb2eed7e0b.slice/crio-33c5aedcdf838effd1823203ad9cf6fd0e4577a9cdd64a0611cf6e99b2547282 WatchSource:0}: Error finding container 33c5aedcdf838effd1823203ad9cf6fd0e4577a9cdd64a0611cf6e99b2547282: Status 404 returned error can't find the container with id 33c5aedcdf838effd1823203ad9cf6fd0e4577a9cdd64a0611cf6e99b2547282 Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.423568 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.445545 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.458163 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: W1013 06:46:54.463206 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35504ef1_729c_4404_bd49_0d82bf23ccbb.slice/crio-028e7fcc709065bf6c16010fbc979df87d3f7b5c3177938a8c5b040855374bff WatchSource:0}: Error finding container 028e7fcc709065bf6c16010fbc979df87d3f7b5c3177938a8c5b040855374bff: Status 404 returned error can't find the container with id 028e7fcc709065bf6c16010fbc979df87d3f7b5c3177938a8c5b040855374bff Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.479699 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.492970 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.520196 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.534680 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.551697 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.582033 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.609584 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.626174 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.642579 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.653048 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.653382 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:46:55.653362794 +0000 UTC m=+23.340807986 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.755115 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.755153 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.755181 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:54 crc kubenswrapper[4664]: I1013 06:46:54.755208 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755286 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755339 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:55.755326403 +0000 UTC m=+23.442771595 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755740 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755760 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755772 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755870 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:55.755858388 +0000 UTC m=+23.443303580 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755868 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755938 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755951 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755953 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:55.75593581 +0000 UTC m=+23.443381002 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755959 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:54 crc kubenswrapper[4664]: E1013 06:46:54.755988 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:55.755980242 +0000 UTC m=+23.443425534 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.050721 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.083996 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.161696 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.163057 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.164123 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.165849 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.166972 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.169011 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.170088 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.171667 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.172559 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.174146 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.175062 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.175996 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.177377 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.178458 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.179451 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.180677 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.181595 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.182700 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.184234 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.185263 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.186591 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.187619 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.188354 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.189953 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.191592 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.192461 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.193209 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.194127 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.194680 4664 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.194875 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.197117 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.197718 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.198289 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.199963 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.201232 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.202011 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.203391 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.204227 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.205525 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.206308 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.207684 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.209004 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.209697 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.210862 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.211635 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.213250 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.213926 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.214512 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.215495 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.216303 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.217634 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.218919 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.236444 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerStarted","Data":"fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.236504 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerStarted","Data":"370f04bf19094f25d25ac71450baea037f319e3fab6d5965352de538af165873"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.239212 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.239244 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.239261 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"028e7fcc709065bf6c16010fbc979df87d3f7b5c3177938a8c5b040855374bff"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.247277 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"50717c8d4af67292990a5a10995018d6edec1b6c5fb18478eb151950736554d1"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.249566 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-96lj2" event={"ID":"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a","Type":"ContainerStarted","Data":"805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.249601 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-96lj2" event={"ID":"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a","Type":"ContainerStarted","Data":"4cd9228759936ee83cd41562103f0b31e60e2dad743f1f4a7cc5ce6024dc9a35"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.252194 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerStarted","Data":"cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.252220 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerStarted","Data":"33c5aedcdf838effd1823203ad9cf6fd0e4577a9cdd64a0611cf6e99b2547282"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.255902 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801" exitCode=0 Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.255974 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.256002 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"f989b2b6944d3f4d874741060aa5479bbc22ba41ddedf3aeb9a5d8862358a48b"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.273885 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.282786 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.282885 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.282900 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fa49f9e6779c65210976ae6219bf282f012077e3de3c2bd68cf38ec2b9d8aec3"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.293222 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.293292 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"aa77c2e9e5c44432c1c594b4500fd462fbc9d1db8910ae490582042fd27d54a3"} Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.297875 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.316398 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.332154 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.344825 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.368423 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.389017 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.400343 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.414067 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.434527 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.452260 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.466347 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.479887 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.494948 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.511909 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.544120 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.563834 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.586039 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.596328 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.612609 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.627956 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.641378 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.659200 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.666358 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.666592 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:46:57.666569941 +0000 UTC m=+25.354015133 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.676078 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.688244 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.698290 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.767314 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.767722 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.767746 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:55 crc kubenswrapper[4664]: I1013 06:46:55.767773 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767480 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767886 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767898 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767909 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767929 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767833 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767943 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.767997 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:57.767976744 +0000 UTC m=+25.455421936 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.768013 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:57.768007815 +0000 UTC m=+25.455453007 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.768051 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:57.768019465 +0000 UTC m=+25.455464657 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.768089 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:55 crc kubenswrapper[4664]: E1013 06:46:55.768249 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:46:57.768213991 +0000 UTC m=+25.455659353 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.046565 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.046606 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.046703 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:46:56 crc kubenswrapper[4664]: E1013 06:46:56.046772 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:46:56 crc kubenswrapper[4664]: E1013 06:46:56.046915 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:46:56 crc kubenswrapper[4664]: E1013 06:46:56.047088 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.301745 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5"} Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.301875 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe"} Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.301898 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c"} Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.301919 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6"} Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.301938 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220"} Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.301963 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989"} Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.310252 4664 generic.go:334] "Generic (PLEG): container finished" podID="a1406c08-b6a4-404d-9b44-05ee214a555d" containerID="fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5" exitCode=0 Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.310319 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerDied","Data":"fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5"} Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.335168 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.355703 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.370202 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.384161 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.408113 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.429163 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.448133 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.463230 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.476716 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.494067 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.511587 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.524184 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.537874 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.555777 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.569699 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.581303 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.596568 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.607075 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.623603 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.637754 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.652417 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.680629 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.710824 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.728200 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.746515 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.763680 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.797924 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.802743 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.808974 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.819693 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.836910 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.861499 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.875782 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.891279 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.910261 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.920543 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.938879 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.951814 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.965923 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.978635 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:56 crc kubenswrapper[4664]: I1013 06:46:56.993137 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:56Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.005341 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.034708 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.094368 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.121462 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.145637 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.172346 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.199495 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.221735 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.255382 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.280879 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.310378 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.316200 4664 generic.go:334] "Generic (PLEG): container finished" podID="a1406c08-b6a4-404d-9b44-05ee214a555d" containerID="9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788" exitCode=0 Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.316308 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerDied","Data":"9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788"} Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.318249 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac"} Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.338172 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.354544 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.370107 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.385012 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.406349 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.428414 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.444548 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.465777 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.479527 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.492938 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.518463 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.541967 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.561353 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.579916 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.613482 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.658843 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.691089 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.691393 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:47:01.691350819 +0000 UTC m=+29.378796011 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.694197 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.735277 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:57Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.791926 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.791969 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.792007 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:46:57 crc kubenswrapper[4664]: I1013 06:46:57.792035 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792200 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792291 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792375 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:01.79234329 +0000 UTC m=+29.479788482 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792227 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792418 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792421 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:01.792391151 +0000 UTC m=+29.479836343 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792434 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792511 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:01.792489614 +0000 UTC m=+29.479935046 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792508 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792601 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792623 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:57 crc kubenswrapper[4664]: E1013 06:46:57.792789 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:01.792758952 +0000 UTC m=+29.480204164 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.046344 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:46:58 crc kubenswrapper[4664]: E1013 06:46:58.046475 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.046532 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:46:58 crc kubenswrapper[4664]: E1013 06:46:58.046580 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.046628 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:46:58 crc kubenswrapper[4664]: E1013 06:46:58.046671 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.328672 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485"} Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.332225 4664 generic.go:334] "Generic (PLEG): container finished" podID="a1406c08-b6a4-404d-9b44-05ee214a555d" containerID="ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c" exitCode=0 Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.332313 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerDied","Data":"ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c"} Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.351339 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.370327 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.384044 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.400637 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.431044 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.455043 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.477496 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.491699 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.505853 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.520181 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.539506 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.553709 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.571336 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.585722 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.907826 4664 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.909819 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.909854 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.909863 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.909950 4664 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.927773 4664 kubelet_node_status.go:115] "Node was previously registered" node="crc" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.928484 4664 kubelet_node_status.go:79] "Successfully registered node" node="crc" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.930109 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.930238 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.930325 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.930410 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.930497 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:58Z","lastTransitionTime":"2025-10-13T06:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:58 crc kubenswrapper[4664]: E1013 06:46:58.953549 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.959156 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.959183 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.959191 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.959208 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.959216 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:58Z","lastTransitionTime":"2025-10-13T06:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:58 crc kubenswrapper[4664]: E1013 06:46:58.977371 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.982000 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.982170 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.982259 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.982341 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:58 crc kubenswrapper[4664]: I1013 06:46:58.982414 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:58Z","lastTransitionTime":"2025-10-13T06:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:58 crc kubenswrapper[4664]: E1013 06:46:58.996340 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:58Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.001281 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.001343 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.001361 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.001386 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.001405 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: E1013 06:46:59.021301 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.025482 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.025584 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.025672 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.025753 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.025836 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: E1013 06:46:59.039763 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: E1013 06:46:59.039970 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.043488 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.044198 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.044273 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.044336 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.044392 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.146715 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.146816 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.146837 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.146864 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.146885 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.255401 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.255510 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.255541 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.255588 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.255632 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.341758 4664 generic.go:334] "Generic (PLEG): container finished" podID="a1406c08-b6a4-404d-9b44-05ee214a555d" containerID="3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e" exitCode=0 Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.341826 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerDied","Data":"3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.359586 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.359983 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.360098 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.360233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.360457 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.371542 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.387824 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.408703 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.424517 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.440488 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.463033 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.463089 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.463106 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.463130 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.463145 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.464174 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.488111 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.506996 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.522846 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.541267 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.560334 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.566959 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.567003 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.567015 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.567032 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.567045 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.579617 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.593269 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.615237 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.635870 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-8fhpj"] Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.636291 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.646992 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.647016 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.647142 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.647345 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.670765 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.670838 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.670853 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.670872 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.670895 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.676581 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.705598 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.711487 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znc5s\" (UniqueName: \"kubernetes.io/projected/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-kube-api-access-znc5s\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.711533 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-host\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.711554 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-serviceca\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.723256 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.737072 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.756355 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.773890 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.773944 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.773956 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.773977 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.774183 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.779014 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.794548 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.811208 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.812756 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-host\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.812862 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-serviceca\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.812937 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-host\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.812989 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znc5s\" (UniqueName: \"kubernetes.io/projected/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-kube-api-access-znc5s\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.814344 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-serviceca\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.834154 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.842043 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znc5s\" (UniqueName: \"kubernetes.io/projected/885fdacf-66f6-46d0-bc3f-f23f8edd8fce-kube-api-access-znc5s\") pod \"node-ca-8fhpj\" (UID: \"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\") " pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.849970 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.863603 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.878210 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.878267 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.878280 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.878304 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.878320 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.878806 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.891325 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.906740 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.919936 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:46:59Z is after 2025-08-24T17:21:41Z" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.956966 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8fhpj" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.981127 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.981164 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.981177 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.981199 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:46:59 crc kubenswrapper[4664]: I1013 06:46:59.981213 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:46:59Z","lastTransitionTime":"2025-10-13T06:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.046946 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.046946 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.047110 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:00 crc kubenswrapper[4664]: E1013 06:47:00.047323 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:00 crc kubenswrapper[4664]: E1013 06:47:00.047472 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:00 crc kubenswrapper[4664]: E1013 06:47:00.047575 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.084749 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.084832 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.084850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.084874 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.084892 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.188389 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.188430 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.188447 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.188471 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.188489 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.291503 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.291538 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.291551 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.291572 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.291587 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.350021 4664 generic.go:334] "Generic (PLEG): container finished" podID="a1406c08-b6a4-404d-9b44-05ee214a555d" containerID="dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08" exitCode=0 Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.350085 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerDied","Data":"dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.357409 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.357760 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.359835 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8fhpj" event={"ID":"885fdacf-66f6-46d0-bc3f-f23f8edd8fce","Type":"ContainerStarted","Data":"702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.359875 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8fhpj" event={"ID":"885fdacf-66f6-46d0-bc3f-f23f8edd8fce","Type":"ContainerStarted","Data":"2c51eb9981ca1668786a1ebfd5e90a7444b1761112096e445f4f04b9f222ea7c"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.371537 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.386386 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.395311 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.395351 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.395360 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.395378 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.395389 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.400383 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.402316 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.418561 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.438714 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.464009 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.484606 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.505845 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.505908 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.505921 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.505946 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.505964 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.508680 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.522594 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.540324 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.557429 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.571725 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.586962 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.602080 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.610018 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.610050 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.610059 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.610076 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.610084 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.615187 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.632588 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.644455 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.663576 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.678375 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.691961 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.708664 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.713475 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.713511 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.713527 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.713547 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.713560 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.725028 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.741471 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.764924 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.779408 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.797524 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.810352 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.816160 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.816211 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.816222 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.816239 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.816250 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.826303 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.839364 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.851269 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.918681 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.918714 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.918723 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.918737 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:00 crc kubenswrapper[4664]: I1013 06:47:00.918746 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:00Z","lastTransitionTime":"2025-10-13T06:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.020869 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.020897 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.020905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.020917 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.020926 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.123505 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.123579 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.123595 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.123624 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.123640 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.226741 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.226888 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.226914 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.226954 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.226977 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.330195 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.330248 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.330260 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.330282 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.330296 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.367836 4664 generic.go:334] "Generic (PLEG): container finished" podID="a1406c08-b6a4-404d-9b44-05ee214a555d" containerID="d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39" exitCode=0 Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.367941 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerDied","Data":"d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.368005 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.368413 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.388534 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.405315 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.410677 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.434094 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.442926 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.443239 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.443304 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.443380 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.443435 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.472933 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.503905 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.532057 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.546501 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.546545 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.546562 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.546591 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.546611 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.551070 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.564119 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.586695 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.607082 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.619544 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.641192 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.649408 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.649461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.649475 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.649501 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.649516 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.658786 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.683898 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.698534 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.716365 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.732198 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.732739 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.732959 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:47:09.732936926 +0000 UTC m=+37.420382138 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.747631 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.752982 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.753038 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.753051 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.753071 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.753084 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.765460 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.788277 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.822590 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.834107 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.834155 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.834192 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.834223 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834392 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834478 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:09.834453282 +0000 UTC m=+37.521898484 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834487 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834526 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834685 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:09.834654058 +0000 UTC m=+37.522099280 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834495 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834723 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834748 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834555 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834841 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:09.834814822 +0000 UTC m=+37.522260024 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.834872 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:01 crc kubenswrapper[4664]: E1013 06:47:01.835003 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:09.834974768 +0000 UTC m=+37.522419970 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.856607 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.856681 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.856701 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.856736 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.856757 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.865674 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.884327 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.897770 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.917870 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.936666 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.954012 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.959957 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.960031 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.960044 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.960071 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.960087 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:01Z","lastTransitionTime":"2025-10-13T06:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:01 crc kubenswrapper[4664]: I1013 06:47:01.978306 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.001494 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:01Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.018066 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.046583 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.046635 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.046582 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:02 crc kubenswrapper[4664]: E1013 06:47:02.046771 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:02 crc kubenswrapper[4664]: E1013 06:47:02.046938 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:02 crc kubenswrapper[4664]: E1013 06:47:02.047036 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.062232 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.062265 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.062274 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.062287 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.062295 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.165191 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.165247 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.165256 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.165277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.165287 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.273337 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.273767 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.273828 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.273855 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.273870 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.375559 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.375627 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.375642 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.375666 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.375682 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.375984 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" event={"ID":"a1406c08-b6a4-404d-9b44-05ee214a555d","Type":"ContainerStarted","Data":"533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.376109 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.404263 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.437776 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.464948 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.477901 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.477948 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.477957 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.477979 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.477992 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.484121 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.501549 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.514209 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.534628 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.548707 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.566485 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.579540 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.579668 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.579711 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.579721 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.579735 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.579743 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.596930 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.612484 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.624897 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.643205 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.655728 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:02Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.682413 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.682451 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.682461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.682494 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.682506 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.785790 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.785870 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.785887 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.785906 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.785920 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.889207 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.889284 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.889303 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.889338 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.889361 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.991944 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.991994 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.992003 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.992017 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:02 crc kubenswrapper[4664]: I1013 06:47:02.992027 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:02Z","lastTransitionTime":"2025-10-13T06:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.064447 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.084167 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.094371 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.094403 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.094411 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.094423 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.094431 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.101861 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.115426 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.142931 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.163307 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.177522 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.190438 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.196243 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.196317 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.196332 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.196381 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.196399 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.206677 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.224921 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.240446 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.256095 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.269499 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.289492 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.299924 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.299983 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.299995 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.300015 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.300064 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.303272 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.382020 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/0.log" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.385572 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd" exitCode=1 Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.385628 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.386869 4664 scope.go:117] "RemoveContainer" containerID="13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.404111 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.404167 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.404192 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.404221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.404244 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.416001 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.432779 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.446438 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.469019 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.485551 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.507961 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.508019 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.508034 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.508060 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.508075 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.521676 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:03Z\\\",\\\"message\\\":\\\"amespace event handler 5\\\\nI1013 06:47:03.058394 5805 handler.go:208] Removed *v1.Node event handler 2\\\\nI1013 06:47:03.058404 5805 handler.go:208] Removed *v1.Node event handler 7\\\\nI1013 06:47:03.058413 5805 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1013 06:47:03.058422 5805 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1013 06:47:03.058675 5805 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:03.058870 5805 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:03.057653 5805 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1013 06:47:03.059245 5805 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:03.059304 5805 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:03.059417 5805 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.547667 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.570179 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.611309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.611349 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.611359 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.611375 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.611385 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.633768 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.646472 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.662645 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.675692 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.714349 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.714389 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.714398 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.714421 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.714434 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.715236 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.755515 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.795692 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.816846 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.816889 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.816902 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.816923 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.816934 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.919507 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.919559 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.919572 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.919592 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:03 crc kubenswrapper[4664]: I1013 06:47:03.919604 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:03Z","lastTransitionTime":"2025-10-13T06:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.021907 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.021953 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.021964 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.021979 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.021991 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.046362 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:04 crc kubenswrapper[4664]: E1013 06:47:04.046549 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.047014 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:04 crc kubenswrapper[4664]: E1013 06:47:04.047124 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.047169 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:04 crc kubenswrapper[4664]: E1013 06:47:04.047240 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.125401 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.125438 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.125449 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.125464 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.125475 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.227395 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.227431 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.227440 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.227453 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.227462 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.329805 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.329850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.329859 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.329871 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.329879 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.391648 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/1.log" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.392579 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/0.log" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.395052 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3" exitCode=1 Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.395094 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.395146 4664 scope.go:117] "RemoveContainer" containerID="13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.395864 4664 scope.go:117] "RemoveContainer" containerID="e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3" Oct 13 06:47:04 crc kubenswrapper[4664]: E1013 06:47:04.396015 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.421344 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.435620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.435672 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.435690 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.435719 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.435759 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.440245 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.456725 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.475637 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.491977 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.509304 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.538597 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.538640 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.538658 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.538677 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.538690 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.539578 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13ec6917b422038ff727df606cfcc8c25de3dfa15196b09dccc976cb4c4cb7cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:03Z\\\",\\\"message\\\":\\\"amespace event handler 5\\\\nI1013 06:47:03.058394 5805 handler.go:208] Removed *v1.Node event handler 2\\\\nI1013 06:47:03.058404 5805 handler.go:208] Removed *v1.Node event handler 7\\\\nI1013 06:47:03.058413 5805 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1013 06:47:03.058422 5805 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1013 06:47:03.058675 5805 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:03.058870 5805 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:03.057653 5805 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1013 06:47:03.059245 5805 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:03.059304 5805 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:03.059417 5805 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.553497 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.572162 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.586123 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.604854 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.621904 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.634849 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.642480 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.642512 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.642524 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.642542 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.642553 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.655160 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.672162 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:04Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.746292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.746409 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.746425 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.746446 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.746461 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.850778 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.850863 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.850879 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.850901 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.850916 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.955206 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.955263 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.955280 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.955306 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:04 crc kubenswrapper[4664]: I1013 06:47:04.955326 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:04Z","lastTransitionTime":"2025-10-13T06:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.058239 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.058307 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.058330 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.058359 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.058384 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.162053 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.162116 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.162137 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.162160 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.162177 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.264643 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.264717 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.264728 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.264745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.264754 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.366932 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.366990 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.367003 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.367024 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.367036 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.399846 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/1.log" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.404085 4664 scope.go:117] "RemoveContainer" containerID="e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3" Oct 13 06:47:05 crc kubenswrapper[4664]: E1013 06:47:05.404325 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.441436 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.469843 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.469885 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.469894 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.469911 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.469922 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.471849 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.517690 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.532843 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.545562 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.557918 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.572613 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.572658 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.572669 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.572685 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.572695 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.577452 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.597059 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.619842 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.634783 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.647891 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.663962 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.675421 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.675465 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.675476 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.675490 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.675501 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.677958 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.692591 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.704253 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:05Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.777360 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.777466 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.777490 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.777522 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.777545 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.879695 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.879735 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.879782 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.879828 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.879839 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.982908 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.982938 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.982946 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.982958 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:05 crc kubenswrapper[4664]: I1013 06:47:05.982967 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:05Z","lastTransitionTime":"2025-10-13T06:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.046212 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:06 crc kubenswrapper[4664]: E1013 06:47:06.046330 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.046632 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:06 crc kubenswrapper[4664]: E1013 06:47:06.046682 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.046714 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:06 crc kubenswrapper[4664]: E1013 06:47:06.046752 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.085011 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.085039 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.085046 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.085059 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.085068 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.187640 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.187712 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.187734 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.187756 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.187773 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.290785 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.290960 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.290987 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.291014 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.291036 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.394330 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.394407 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.394425 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.394451 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.394468 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.487246 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.497009 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.497185 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.497204 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.497225 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.497242 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.502764 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.519544 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.532197 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.542356 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.553407 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.570104 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.588608 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.600068 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.600112 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.600125 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.600142 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.600153 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.605934 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.616625 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.630325 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.643288 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.654514 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.667829 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.678020 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.688412 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:06Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.702289 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.702347 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.702365 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.702390 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.702408 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.805145 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.805188 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.805204 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.805224 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.805233 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.908117 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.908189 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.908200 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.908216 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:06 crc kubenswrapper[4664]: I1013 06:47:06.908228 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:06Z","lastTransitionTime":"2025-10-13T06:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.011043 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.011347 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.011444 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.011544 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.011644 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.114478 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.114834 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.115098 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.115225 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.115311 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.136873 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.137833 4664 scope.go:117] "RemoveContainer" containerID="e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3" Oct 13 06:47:07 crc kubenswrapper[4664]: E1013 06:47:07.138009 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.217824 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.218019 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.218082 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.218188 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.218253 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.321222 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.321279 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.321295 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.321318 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.321332 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.425130 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.425403 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.425468 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.425536 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.425592 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.528837 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.528878 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.528952 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.528967 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.529014 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.632327 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.632382 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.632394 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.632414 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.632428 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.647108 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs"] Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.648449 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.651154 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.653084 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.672472 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.684710 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.697313 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.713076 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.727289 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.734711 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.734760 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.734772 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.734810 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.734826 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.743833 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.774418 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.795481 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.800955 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-env-overrides\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.801022 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.801064 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.801086 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcn42\" (UniqueName: \"kubernetes.io/projected/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-kube-api-access-wcn42\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.818709 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.837076 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.838231 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.838283 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.838294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.838307 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.838321 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.858862 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.877718 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.899167 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.902572 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-env-overrides\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.902629 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.902677 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.902707 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcn42\" (UniqueName: \"kubernetes.io/projected/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-kube-api-access-wcn42\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.903402 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-env-overrides\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.903510 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.914900 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.924606 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.935946 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcn42\" (UniqueName: \"kubernetes.io/projected/d33b17b0-8a4f-493a-a316-7d32a7f1ae45-kube-api-access-wcn42\") pod \"ovnkube-control-plane-749d76644c-r8wrs\" (UID: \"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.942628 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.942706 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.942722 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.942745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.942762 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:07Z","lastTransitionTime":"2025-10-13T06:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.946447 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.970065 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" Oct 13 06:47:07 crc kubenswrapper[4664]: I1013 06:47:07.972568 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:07Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.045430 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.045474 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.045483 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.045496 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.045505 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.045776 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:08 crc kubenswrapper[4664]: E1013 06:47:08.045870 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.045933 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.046207 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:08 crc kubenswrapper[4664]: E1013 06:47:08.046873 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:08 crc kubenswrapper[4664]: E1013 06:47:08.046944 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.148006 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.148032 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.148042 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.148054 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.148063 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.249346 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.249385 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.249396 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.249412 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.249424 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.351407 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.351477 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.351495 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.351526 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.351547 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.396883 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-9mgbt"] Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.397285 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:08 crc kubenswrapper[4664]: E1013 06:47:08.397338 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.419906 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" event={"ID":"d33b17b0-8a4f-493a-a316-7d32a7f1ae45","Type":"ContainerStarted","Data":"996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.419958 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" event={"ID":"d33b17b0-8a4f-493a-a316-7d32a7f1ae45","Type":"ContainerStarted","Data":"cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.419970 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" event={"ID":"d33b17b0-8a4f-493a-a316-7d32a7f1ae45","Type":"ContainerStarted","Data":"e3b0ae0dcb5e6713532beb8dfd01b0912961ba5ae9f47a11728c4ccc8d030c5d"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.427233 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.446160 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.453557 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.453602 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.453611 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.453642 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.453651 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.465022 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.481254 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.494816 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.507637 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.508698 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2nb7\" (UniqueName: \"kubernetes.io/projected/eba49cc7-48bb-4372-8eb3-c88513c591b9-kube-api-access-v2nb7\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.508814 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.521099 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.533766 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.550918 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.555725 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.555781 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.555822 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.555839 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.555848 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.571840 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.583667 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.594728 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.610440 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.610479 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2nb7\" (UniqueName: \"kubernetes.io/projected/eba49cc7-48bb-4372-8eb3-c88513c591b9-kube-api-access-v2nb7\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.610540 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:08 crc kubenswrapper[4664]: E1013 06:47:08.610742 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:08 crc kubenswrapper[4664]: E1013 06:47:08.610854 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:09.110825566 +0000 UTC m=+36.798270778 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.626756 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.627015 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2nb7\" (UniqueName: \"kubernetes.io/projected/eba49cc7-48bb-4372-8eb3-c88513c591b9-kube-api-access-v2nb7\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.640823 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.657495 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.658429 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.658456 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.658468 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.658486 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.658497 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.669025 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.683150 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.699185 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.714051 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.728494 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.743856 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.761628 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.761681 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.761694 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.761716 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.761728 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.763722 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.782136 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.800317 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.814445 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.826749 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.838164 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.852639 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.863985 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.864033 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.864050 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.864072 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.864089 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.868034 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.881862 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.897345 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.917871 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.931830 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:08Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.966551 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.966593 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.966607 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.966625 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:08 crc kubenswrapper[4664]: I1013 06:47:08.966636 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:08Z","lastTransitionTime":"2025-10-13T06:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.069140 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.069186 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.069195 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.069209 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.069217 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.116033 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.116251 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.116563 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:10.116545394 +0000 UTC m=+37.803990586 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.173271 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.173331 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.173348 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.173373 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.173394 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.265004 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.265171 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.265184 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.265197 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.265207 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.283126 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:09Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.287494 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.287571 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.287585 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.287600 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.287612 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.300759 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:09Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.305359 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.305386 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.305394 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.305435 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.305450 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.332234 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:09Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.338122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.338176 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.338193 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.338233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.338246 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.357274 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:09Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.362986 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.363029 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.363047 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.363073 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.363091 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.385845 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:09Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.386077 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.394845 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.394930 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.394956 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.394991 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.395011 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.498291 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.498358 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.498375 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.498402 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.498422 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.601914 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.601983 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.602002 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.602030 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.602049 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.704247 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.704337 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.704359 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.704384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.704402 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.807052 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.807114 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.807135 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.807159 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.807177 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.823762 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.823938 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:47:25.823911425 +0000 UTC m=+53.511356657 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.910515 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.910578 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.910602 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.910626 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.910644 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:09Z","lastTransitionTime":"2025-10-13T06:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.925353 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.925416 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.925450 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:09 crc kubenswrapper[4664]: I1013 06:47:09.925475 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925618 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925618 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925679 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925725 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925747 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925704 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:25.925681919 +0000 UTC m=+53.613127121 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925846 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:25.925784972 +0000 UTC m=+53.613230184 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925871 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925875 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:25.925860774 +0000 UTC m=+53.613305986 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925921 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.925941 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:09 crc kubenswrapper[4664]: E1013 06:47:09.926040 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:25.926016648 +0000 UTC m=+53.613461880 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.013097 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.013168 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.013180 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.013199 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.013215 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.046528 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.046567 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.046549 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:10 crc kubenswrapper[4664]: E1013 06:47:10.046659 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.046754 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:10 crc kubenswrapper[4664]: E1013 06:47:10.046858 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:10 crc kubenswrapper[4664]: E1013 06:47:10.047088 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:10 crc kubenswrapper[4664]: E1013 06:47:10.046995 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.116210 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.116301 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.116327 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.116364 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.116389 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.128068 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:10 crc kubenswrapper[4664]: E1013 06:47:10.128270 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:10 crc kubenswrapper[4664]: E1013 06:47:10.128417 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:12.128374732 +0000 UTC m=+39.815819954 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.219511 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.219602 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.219631 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.219663 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.219689 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.323162 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.323213 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.323225 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.323240 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.323253 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.426425 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.426492 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.426511 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.426538 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.426567 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.529060 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.529134 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.529153 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.529177 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.529203 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.633197 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.633900 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.633929 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.633954 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.633971 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.737089 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.737169 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.737193 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.737228 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.737265 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.841877 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.841952 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.841971 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.842002 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.842021 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.945309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.945379 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.945398 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.945426 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:10 crc kubenswrapper[4664]: I1013 06:47:10.945445 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:10Z","lastTransitionTime":"2025-10-13T06:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.048728 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.048861 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.048888 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.048925 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.048953 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.151973 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.152033 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.152043 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.152058 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.152067 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.256483 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.256555 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.256577 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.256613 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.256638 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.361146 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.361212 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.361240 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.361274 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.361299 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.464626 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.464690 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.464709 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.464740 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.464763 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.568915 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.568989 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.569009 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.569040 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.569065 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.674276 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.674423 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.674469 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.674554 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.674623 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.778715 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.778771 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.778789 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.778848 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.778866 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.881932 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.882074 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.882097 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.882130 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.882149 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.985945 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.986018 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.986040 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.986070 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:11 crc kubenswrapper[4664]: I1013 06:47:11.986091 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:11Z","lastTransitionTime":"2025-10-13T06:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.046370 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:12 crc kubenswrapper[4664]: E1013 06:47:12.046624 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.047174 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:12 crc kubenswrapper[4664]: E1013 06:47:12.047353 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.047415 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:12 crc kubenswrapper[4664]: E1013 06:47:12.047620 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.047169 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:12 crc kubenswrapper[4664]: E1013 06:47:12.047777 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.090717 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.090862 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.090877 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.090900 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.090915 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.155895 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:12 crc kubenswrapper[4664]: E1013 06:47:12.156235 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:12 crc kubenswrapper[4664]: E1013 06:47:12.156377 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:16.156342069 +0000 UTC m=+43.843787291 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.194072 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.194124 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.194141 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.194166 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.194188 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.298196 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.298280 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.298301 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.298330 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.298350 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.401841 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.401921 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.401941 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.401971 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.401991 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.505631 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.505695 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.505712 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.505738 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.505760 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.609274 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.609942 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.609986 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.610016 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.610039 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.712511 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.712542 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.712553 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.712566 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.712575 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.816308 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.816667 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.816847 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.816966 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.817052 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.920626 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.920702 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.920722 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.920754 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:12 crc kubenswrapper[4664]: I1013 06:47:12.920775 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:12Z","lastTransitionTime":"2025-10-13T06:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.024007 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.024082 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.024110 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.024147 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.024171 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.070772 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.088108 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.105654 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.127027 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.127119 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.127143 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.127170 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.127228 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.142230 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.177049 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.197085 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.218329 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.230922 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.231001 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.231025 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.231057 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.231081 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.246019 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.299932 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.323908 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.333262 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.333309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.333324 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.333342 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.333352 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.347244 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.376807 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.407028 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.419091 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.432966 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.436128 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.436191 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.436208 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.436265 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.436282 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.447474 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.461829 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:13Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.539041 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.539090 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.539101 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.539115 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.539124 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.642277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.642360 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.642421 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.642458 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.642481 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.745235 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.745320 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.745335 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.745353 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.745365 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.848907 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.848947 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.848960 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.848978 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.848991 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.951813 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.951863 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.951873 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.951888 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:13 crc kubenswrapper[4664]: I1013 06:47:13.951900 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:13Z","lastTransitionTime":"2025-10-13T06:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.046967 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.047007 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.047065 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:14 crc kubenswrapper[4664]: E1013 06:47:14.047192 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:14 crc kubenswrapper[4664]: E1013 06:47:14.047470 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.047220 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:14 crc kubenswrapper[4664]: E1013 06:47:14.047699 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:14 crc kubenswrapper[4664]: E1013 06:47:14.047616 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.054689 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.054729 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.054743 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.054762 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.054779 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.158045 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.158099 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.158111 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.158130 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.158143 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.262068 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.262140 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.262158 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.262184 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.262205 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.364850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.364931 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.364950 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.364974 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.364992 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.467745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.467814 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.467827 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.467846 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.467859 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.570447 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.570517 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.570536 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.570559 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.570576 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.673155 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.673201 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.673213 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.673228 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.673237 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.776309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.776373 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.776393 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.776427 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.776450 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.879627 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.879707 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.879725 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.879756 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.879774 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.983752 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.983864 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.983887 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.983918 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:14 crc kubenswrapper[4664]: I1013 06:47:14.983943 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:14Z","lastTransitionTime":"2025-10-13T06:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.090334 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.090424 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.090451 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.090482 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.090688 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.194139 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.194233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.194262 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.194296 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.194318 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.298030 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.298103 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.298122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.298153 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.298175 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.402152 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.402193 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.402202 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.402216 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.402228 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.505036 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.505119 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.505141 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.505172 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.505193 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.608270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.608333 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.608345 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.608365 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.608381 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.712560 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.712641 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.712661 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.712693 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.712711 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.816578 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.816660 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.816685 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.816722 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.816748 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.920602 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.920673 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.920698 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.920739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:15 crc kubenswrapper[4664]: I1013 06:47:15.920765 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:15Z","lastTransitionTime":"2025-10-13T06:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.024215 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.024257 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.024268 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.024284 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.024294 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.046864 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.046894 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.046941 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.046901 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:16 crc kubenswrapper[4664]: E1013 06:47:16.047100 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:16 crc kubenswrapper[4664]: E1013 06:47:16.047187 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:16 crc kubenswrapper[4664]: E1013 06:47:16.047265 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:16 crc kubenswrapper[4664]: E1013 06:47:16.047324 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.126879 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.126909 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.126917 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.126931 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.126941 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.212583 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:16 crc kubenswrapper[4664]: E1013 06:47:16.212730 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:16 crc kubenswrapper[4664]: E1013 06:47:16.212779 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:24.212766078 +0000 UTC m=+51.900211270 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.228519 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.228549 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.228578 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.228592 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.228602 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.331734 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.331776 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.331785 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.331823 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.331835 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.434048 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.434079 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.434088 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.434102 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.434113 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.536354 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.536385 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.536393 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.536406 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.536414 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.638820 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.638853 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.638862 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.638901 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.638914 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.742090 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.742175 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.742198 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.742230 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.742253 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.844728 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.844765 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.844774 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.844787 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.844810 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.947134 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.947201 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.947215 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.947610 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:16 crc kubenswrapper[4664]: I1013 06:47:16.947655 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:16Z","lastTransitionTime":"2025-10-13T06:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.050461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.050535 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.050545 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.050564 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.050578 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.153292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.153321 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.153329 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.153342 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.153351 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.255703 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.255744 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.255753 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.255767 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.255776 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.358116 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.358156 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.358166 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.358184 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.358198 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.460443 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.460734 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.460817 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.460879 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.460957 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.563529 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.563579 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.563590 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.563608 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.563617 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.665487 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.665521 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.665531 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.665547 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.665558 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.767644 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.767683 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.767691 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.767704 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.767714 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.869700 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.869746 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.869758 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.869774 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.869830 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.971919 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.971969 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.971985 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.972009 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:17 crc kubenswrapper[4664]: I1013 06:47:17.972022 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:17Z","lastTransitionTime":"2025-10-13T06:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.046143 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.046143 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:18 crc kubenswrapper[4664]: E1013 06:47:18.046272 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.046164 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.046165 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:18 crc kubenswrapper[4664]: E1013 06:47:18.046558 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:18 crc kubenswrapper[4664]: E1013 06:47:18.046631 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:18 crc kubenswrapper[4664]: E1013 06:47:18.046700 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.046871 4664 scope.go:117] "RemoveContainer" containerID="e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.081221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.081523 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.081660 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.081821 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.081980 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.184516 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.184785 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.184814 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.184828 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.184837 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.287664 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.287707 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.287718 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.287736 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.287749 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.390461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.390521 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.390535 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.390556 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.390571 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.455198 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/1.log" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.457788 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.459042 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.477899 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.489048 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.492916 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.492965 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.492977 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.492997 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.493010 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.501831 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.515643 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.528460 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.546249 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.572065 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.590134 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.595277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.595305 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.595327 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.595343 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.595355 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.604138 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.620875 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.634021 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.648036 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.661422 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.672147 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.685789 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.698161 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.698206 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.698215 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.698234 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.698246 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.699295 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.711307 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:18Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.801018 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.801061 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.801071 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.801090 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.801102 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.904265 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.904329 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.904347 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.904376 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:18 crc kubenswrapper[4664]: I1013 06:47:18.904396 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:18Z","lastTransitionTime":"2025-10-13T06:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.008000 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.008079 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.008134 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.008176 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.008203 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.111612 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.111668 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.111685 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.111711 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.111729 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.214892 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.214959 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.214981 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.215013 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.215036 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.317909 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.317993 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.318021 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.318581 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.318660 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.422152 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.422214 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.422232 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.422259 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.422279 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.465692 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/2.log" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.467985 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/1.log" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.472746 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0" exitCode=1 Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.472878 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.472980 4664 scope.go:117] "RemoveContainer" containerID="e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.474865 4664 scope.go:117] "RemoveContainer" containerID="54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0" Oct 13 06:47:19 crc kubenswrapper[4664]: E1013 06:47:19.476947 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.501919 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.525204 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.525259 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.525275 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.525328 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.525349 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.526276 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.541651 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.560536 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.570719 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.570764 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.570783 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.570839 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.570864 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.581075 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: E1013 06:47:19.591423 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.596551 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.596628 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.596650 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.596684 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.596704 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.602750 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: E1013 06:47:19.617994 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.622394 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.625325 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.625393 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.625417 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.625451 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.625475 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.645845 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: E1013 06:47:19.648362 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.653907 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.653953 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.653962 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.653977 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.653989 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: E1013 06:47:19.669560 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.674190 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.674241 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.674254 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.674278 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.674293 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.674584 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e0f2c6e6dcc217625f8debed161767a19d7ff8729d71bff9a6ad5f0382c2f8c3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:04Z\\\",\\\"message\\\":\\\") from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1013 06:47:04.309259 5997 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309324 5997 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309400 5997 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.309627 5997 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1013 06:47:04.309968 5997 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1013 06:47:04.310140 5997 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1013 06:47:04.310598 5997 ovnkube.go:599] Stopped ovnkube\\\\nI1013 06:47:04.310628 5997 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:04.310689 5997 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: E1013 06:47:19.687858 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: E1013 06:47:19.688029 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.689997 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.690038 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.690055 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.690074 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.690089 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.697190 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.720308 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.733746 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.747639 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.760271 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.774092 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.786629 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.796689 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.796734 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.796745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.796761 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.796772 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.811309 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:19Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.900937 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.901046 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.901065 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.901094 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:19 crc kubenswrapper[4664]: I1013 06:47:19.901115 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:19Z","lastTransitionTime":"2025-10-13T06:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.003995 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.004053 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.004064 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.004083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.004095 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.046692 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:20 crc kubenswrapper[4664]: E1013 06:47:20.046935 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.047212 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:20 crc kubenswrapper[4664]: E1013 06:47:20.047382 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.047465 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:20 crc kubenswrapper[4664]: E1013 06:47:20.047526 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.047553 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:20 crc kubenswrapper[4664]: E1013 06:47:20.047645 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.107967 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.108016 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.108028 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.108044 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.108055 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.210216 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.210270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.210287 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.210309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.210323 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.312934 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.312982 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.312993 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.313010 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.313022 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.416311 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.416380 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.416400 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.416422 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.416437 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.486515 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/2.log" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.492658 4664 scope.go:117] "RemoveContainer" containerID="54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0" Oct 13 06:47:20 crc kubenswrapper[4664]: E1013 06:47:20.493013 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.515457 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.521025 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.521089 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.521104 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.521125 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.521146 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.553870 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.578248 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.594781 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.617738 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.623318 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.623354 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.623367 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.623385 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.623399 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.631988 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.646268 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.658896 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.673908 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.687244 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.700832 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.715356 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.726384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.726454 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.726474 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.726506 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.726531 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.728837 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.739769 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.753641 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.765949 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.776103 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:20Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.829217 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.829260 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.829269 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.829303 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.829314 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.931868 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.932110 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.932215 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.932300 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:20 crc kubenswrapper[4664]: I1013 06:47:20.932391 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:20Z","lastTransitionTime":"2025-10-13T06:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.035071 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.035106 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.035114 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.035127 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.035136 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.137155 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.137575 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.137731 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.137940 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.138082 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.240447 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.240497 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.240507 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.240520 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.240531 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.342962 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.343002 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.343013 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.343029 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.343043 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.445232 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.445266 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.445278 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.445297 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.445309 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.549224 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.549310 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.549333 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.549363 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.549385 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.653154 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.653246 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.653272 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.653310 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.653335 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.757243 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.757328 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.757352 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.757386 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.757408 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.860590 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.860645 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.860659 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.860680 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.860694 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.963478 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.963541 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.963557 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.963577 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:21 crc kubenswrapper[4664]: I1013 06:47:21.964016 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:21Z","lastTransitionTime":"2025-10-13T06:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.046586 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.046742 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:22 crc kubenswrapper[4664]: E1013 06:47:22.046915 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.046752 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:22 crc kubenswrapper[4664]: E1013 06:47:22.047057 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:22 crc kubenswrapper[4664]: E1013 06:47:22.047078 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.047227 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:22 crc kubenswrapper[4664]: E1013 06:47:22.047334 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.067436 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.067491 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.067505 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.067526 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.067542 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.170644 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.170704 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.170722 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.170748 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.170766 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.273186 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.273255 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.273268 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.273284 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.273316 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.376821 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.376877 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.376891 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.376915 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.376928 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.479767 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.479889 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.479909 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.479937 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.479955 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.582942 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.582994 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.583012 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.583038 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.583056 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.686069 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.686122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.686139 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.686163 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.686178 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.788264 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.788315 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.788329 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.788346 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.788358 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.890930 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.890998 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.891012 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.891028 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.891044 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.994241 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.994317 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.994356 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.994394 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:22 crc kubenswrapper[4664]: I1013 06:47:22.994416 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:22Z","lastTransitionTime":"2025-10-13T06:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.061349 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.077235 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.092111 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.098313 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.098368 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.098390 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.098416 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.098439 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.108727 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.125658 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.164206 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.188532 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.201188 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.201221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.201229 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.201243 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.201253 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.206111 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.219617 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.235655 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.258256 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.287941 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.302191 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.303783 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.303849 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.303865 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.303889 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.303907 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.321132 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.336469 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.351925 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.364633 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:23Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.406148 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.406197 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.406209 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.406228 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.406239 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.508885 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.508923 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.508939 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.508959 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.508973 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.612124 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.612246 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.612267 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.612291 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.612312 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.716024 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.716096 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.716120 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.716154 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.716179 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.819300 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.819364 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.819379 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.819401 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.819417 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.922756 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.922823 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.922835 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.922851 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:23 crc kubenswrapper[4664]: I1013 06:47:23.922864 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:23Z","lastTransitionTime":"2025-10-13T06:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.024467 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.024503 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.024514 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.024530 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.024541 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.046595 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.046594 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.046601 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.046609 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:24 crc kubenswrapper[4664]: E1013 06:47:24.046719 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:24 crc kubenswrapper[4664]: E1013 06:47:24.046925 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:24 crc kubenswrapper[4664]: E1013 06:47:24.046974 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:24 crc kubenswrapper[4664]: E1013 06:47:24.047015 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.126353 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.126392 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.126402 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.126418 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.126429 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.228282 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.228317 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.228328 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.228344 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.228354 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.301868 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:24 crc kubenswrapper[4664]: E1013 06:47:24.302023 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:24 crc kubenswrapper[4664]: E1013 06:47:24.302081 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:40.3020676 +0000 UTC m=+67.989512792 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.330867 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.330896 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.330904 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.330917 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.330926 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.433116 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.433149 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.433163 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.433176 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.433184 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.535254 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.535295 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.535310 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.535330 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.535345 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.637753 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.637857 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.637880 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.637907 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.638403 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.741122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.741161 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.741172 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.741187 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.741197 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.844349 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.844397 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.844408 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.844424 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.844439 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.946587 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.946631 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.946640 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.946654 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:24 crc kubenswrapper[4664]: I1013 06:47:24.946666 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:24Z","lastTransitionTime":"2025-10-13T06:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.049290 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.049333 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.049350 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.049377 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.049394 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.151783 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.151867 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.151883 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.151906 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.151923 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.254443 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.254494 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.254506 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.254527 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.254540 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.357461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.357527 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.357546 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.357625 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.357647 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.460844 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.460879 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.460889 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.460903 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.460914 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.562751 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.562808 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.562821 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.562837 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.562849 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.584148 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.598483 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.598776 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.617217 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.630511 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.642446 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.653724 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.665371 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.665405 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.665431 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.665448 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.665461 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.667824 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.688860 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.704894 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.722301 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.734870 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.755465 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.768519 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.768567 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.768578 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.768595 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.768607 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.779494 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.795995 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.807187 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.826636 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.844413 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.862864 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:25Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.871215 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.871277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.871290 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.871306 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.871318 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.918559 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:47:25 crc kubenswrapper[4664]: E1013 06:47:25.918915 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:47:57.918870339 +0000 UTC m=+85.606315641 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.974892 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.974953 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.974966 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.974988 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:25 crc kubenswrapper[4664]: I1013 06:47:25.975002 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:25Z","lastTransitionTime":"2025-10-13T06:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.019947 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.020022 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.020057 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.020082 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020241 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020300 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020331 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:58.020305063 +0000 UTC m=+85.707750275 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020358 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:58.020343764 +0000 UTC m=+85.707788966 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020243 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020437 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020480 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020583 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:58.020555799 +0000 UTC m=+85.708001041 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020664 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020684 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020702 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.020764 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:47:58.020747785 +0000 UTC m=+85.708193027 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.046492 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.046562 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.046511 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.046849 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.046696 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.046923 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.047003 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:26 crc kubenswrapper[4664]: E1013 06:47:26.047078 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.077115 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.077151 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.077164 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.077183 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.077194 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.180285 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.180330 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.180345 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.180360 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.180371 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.283043 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.283075 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.283083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.283095 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.283104 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.385600 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.385632 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.385644 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.385661 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.385672 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.488153 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.488205 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.488215 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.488231 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.488243 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.591339 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.591388 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.591399 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.591418 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.591429 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.694365 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.694433 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.694447 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.694491 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.694535 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.802685 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.802758 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.802776 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.802834 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.802853 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.905272 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.905331 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.905349 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.905373 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:26 crc kubenswrapper[4664]: I1013 06:47:26.905390 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:26Z","lastTransitionTime":"2025-10-13T06:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.008340 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.008404 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.008421 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.008449 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.008466 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.112050 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.112137 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.112153 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.112174 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.112189 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.214261 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.214766 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.214968 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.215044 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.215105 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.318580 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.318622 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.318634 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.318649 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.318660 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.421740 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.421832 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.421842 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.421860 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.421870 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.524357 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.524445 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.524471 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.524511 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.524536 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.627628 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.627693 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.627709 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.627736 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.627751 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.730496 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.730557 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.730570 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.730592 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.730605 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.834699 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.834773 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.834826 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.834851 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.834870 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.939020 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.939112 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.939138 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.939171 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:27 crc kubenswrapper[4664]: I1013 06:47:27.939192 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:27Z","lastTransitionTime":"2025-10-13T06:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.042578 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.042646 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.042665 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.042692 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.042714 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.046854 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.046935 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.046868 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.046868 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:28 crc kubenswrapper[4664]: E1013 06:47:28.047033 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:28 crc kubenswrapper[4664]: E1013 06:47:28.047175 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:28 crc kubenswrapper[4664]: E1013 06:47:28.047304 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:28 crc kubenswrapper[4664]: E1013 06:47:28.047427 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.145615 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.145682 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.145701 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.145729 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.145742 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.248198 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.248258 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.248269 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.248302 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.248312 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.351072 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.351137 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.351150 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.351167 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.351178 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.454358 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.454431 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.454452 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.454478 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.454496 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.557185 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.557261 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.557293 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.557327 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.557349 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.660845 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.660905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.660918 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.660947 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.660967 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.764503 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.764591 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.764615 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.764692 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.764717 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.868260 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.868325 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.868346 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.868374 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.868392 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.972406 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.972465 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.972476 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.972498 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:28 crc kubenswrapper[4664]: I1013 06:47:28.972510 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:28Z","lastTransitionTime":"2025-10-13T06:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.075750 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.075815 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.075825 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.075844 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.075855 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.178841 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.178907 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.178921 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.178947 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.178964 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.282719 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.282783 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.282823 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.282854 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.282872 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.386274 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.386339 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.386355 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.386377 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.386393 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.489289 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.489348 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.489367 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.489393 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.489413 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.593037 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.593110 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.593128 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.593154 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.593183 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.697307 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.697375 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.697393 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.697424 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.697445 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.800591 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.800653 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.800670 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.800696 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.800739 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.904868 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.904919 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.904929 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.904951 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:29 crc kubenswrapper[4664]: I1013 06:47:29.904967 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:29Z","lastTransitionTime":"2025-10-13T06:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.009023 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.009059 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.009067 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.009083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.009094 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.023123 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.023167 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.023183 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.023205 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.023224 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.046616 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.046791 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.046755 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.047004 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.047005 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.047261 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.047563 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.047724 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.049195 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:30Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.055352 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.055436 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.055458 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.055481 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.055499 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.077305 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:30Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.083205 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.083288 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.083308 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.083335 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.083353 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.106599 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:30Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.111873 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.111957 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.111978 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.112005 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.112055 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.133517 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:30Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.140210 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.140257 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.140275 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.140302 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.140320 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.163625 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:30Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:30 crc kubenswrapper[4664]: E1013 06:47:30.163952 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.166636 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.166683 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.166700 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.166721 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.166738 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.270298 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.270677 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.270984 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.271206 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.271374 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.374273 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.374922 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.374943 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.374987 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.375002 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.480181 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.480221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.480234 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.480261 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.480278 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.583721 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.583836 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.583856 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.583886 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.583905 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.687411 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.687497 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.687515 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.687546 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.687568 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.791322 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.791460 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.791481 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.791503 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.791520 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.895731 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.895856 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.895875 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.895936 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.895959 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.999333 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.999410 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.999428 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.999473 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:30 crc kubenswrapper[4664]: I1013 06:47:30.999496 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:30Z","lastTransitionTime":"2025-10-13T06:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.102244 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.102370 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.102392 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.102492 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.102513 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.212974 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.213049 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.213070 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.213095 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.213112 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.316980 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.317033 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.317052 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.317076 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.317095 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.420973 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.421038 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.421056 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.421081 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.421101 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.524359 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.524445 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.524472 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.524510 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.524531 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.629139 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.629221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.629243 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.629270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.629289 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.732582 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.732652 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.732670 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.732696 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.732715 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.836182 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.836269 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.836293 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.836327 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.836351 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.939811 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.939875 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.939891 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.939913 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:31 crc kubenswrapper[4664]: I1013 06:47:31.939928 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:31Z","lastTransitionTime":"2025-10-13T06:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.044126 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.044184 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.044196 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.044213 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.044229 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.046772 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:32 crc kubenswrapper[4664]: E1013 06:47:32.047136 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.047193 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.047270 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:32 crc kubenswrapper[4664]: E1013 06:47:32.047441 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.047704 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:32 crc kubenswrapper[4664]: E1013 06:47:32.048001 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:32 crc kubenswrapper[4664]: E1013 06:47:32.048294 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.050153 4664 scope.go:117] "RemoveContainer" containerID="54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0" Oct 13 06:47:32 crc kubenswrapper[4664]: E1013 06:47:32.050445 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.148196 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.148332 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.148353 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.148380 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.148399 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.252401 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.252463 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.252481 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.252506 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.252525 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.355857 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.355931 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.355958 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.355986 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.356006 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.459519 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.459571 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.459587 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.459610 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.459627 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.563135 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.563210 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.563228 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.563255 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.563275 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.666992 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.667078 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.667094 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.667123 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.667146 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.769855 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.769955 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.769986 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.770032 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.770061 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.873646 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.873732 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.873756 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.873787 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.873853 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.975783 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.975850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.975863 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.975881 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:32 crc kubenswrapper[4664]: I1013 06:47:32.975895 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:32Z","lastTransitionTime":"2025-10-13T06:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.069576 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.079323 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.079368 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.079384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.079408 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.079428 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.089421 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.108170 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.126659 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.157435 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.184251 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.184343 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.184357 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.184379 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.184414 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.184601 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.204529 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.226097 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.247301 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.273390 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.288469 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.288662 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.288687 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.288737 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.288756 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.291729 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.306246 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.321404 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.336534 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.352322 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.367004 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.387706 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.391636 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.391685 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.391704 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.391726 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.391742 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.401043 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:33Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.493985 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.494030 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.494039 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.494054 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.494064 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.597589 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.597662 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.597680 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.597710 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.597730 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.700931 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.700973 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.700985 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.701005 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.701021 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.804585 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.804692 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.804720 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.804748 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.804768 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.906659 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.906899 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.907055 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.907150 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:33 crc kubenswrapper[4664]: I1013 06:47:33.907238 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:33Z","lastTransitionTime":"2025-10-13T06:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.011077 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.011128 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.011141 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.011159 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.011174 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.046951 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.047003 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:34 crc kubenswrapper[4664]: E1013 06:47:34.047129 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.046963 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:34 crc kubenswrapper[4664]: E1013 06:47:34.047288 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:34 crc kubenswrapper[4664]: E1013 06:47:34.047371 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.047625 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:34 crc kubenswrapper[4664]: E1013 06:47:34.047965 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.114128 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.114437 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.114612 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.114773 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.115009 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.218680 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.218739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.218752 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.218775 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.218820 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.321336 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.321386 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.321403 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.321423 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.321436 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.424480 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.424553 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.424576 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.424606 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.424629 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.527836 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.527883 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.527924 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.527943 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.527958 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.630935 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.631020 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.631048 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.631083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.631102 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.734904 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.734971 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.734988 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.735014 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.735031 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.842089 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.842163 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.842186 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.842214 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.842233 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.945209 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.945249 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.945260 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.945276 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:34 crc kubenswrapper[4664]: I1013 06:47:34.945288 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:34Z","lastTransitionTime":"2025-10-13T06:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.048133 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.048166 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.048178 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.048196 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.048207 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.151089 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.151121 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.151132 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.151148 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.151161 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.254084 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.254143 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.254155 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.254174 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.254188 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.356584 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.356628 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.356643 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.356664 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.356681 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.459523 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.459582 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.459605 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.459633 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.459657 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.562428 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.562479 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.562495 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.562519 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.562534 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.664356 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.664506 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.664652 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.664694 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.664774 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.768053 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.768106 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.768122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.768145 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.768161 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.871682 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.871728 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.871744 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.871767 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.871784 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.975181 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.975247 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.975270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.975295 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:35 crc kubenswrapper[4664]: I1013 06:47:35.975313 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:35Z","lastTransitionTime":"2025-10-13T06:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.046325 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:36 crc kubenswrapper[4664]: E1013 06:47:36.046497 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.046773 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:36 crc kubenswrapper[4664]: E1013 06:47:36.046961 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.057444 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:36 crc kubenswrapper[4664]: E1013 06:47:36.057662 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.057450 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:36 crc kubenswrapper[4664]: E1013 06:47:36.058082 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.078123 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.078163 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.078172 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.078185 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.078194 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.181092 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.181144 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.181156 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.181174 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.181185 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.283098 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.283129 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.283138 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.283150 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.283159 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.394081 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.394132 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.394144 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.394162 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.394176 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.496467 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.496516 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.496531 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.496550 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.496567 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.600021 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.600073 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.600087 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.600103 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.600114 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.702859 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.702914 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.702931 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.702953 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.702971 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.805440 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.805521 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.805544 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.805568 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.805586 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.909227 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.909265 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.909275 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.909292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:36 crc kubenswrapper[4664]: I1013 06:47:36.909306 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:36Z","lastTransitionTime":"2025-10-13T06:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.012292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.012331 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.012344 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.012360 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.012372 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.114654 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.114747 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.114764 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.114787 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.114837 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.217458 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.217494 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.217502 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.217514 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.217525 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.320351 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.320401 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.320417 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.320438 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.320455 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.422678 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.422716 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.422743 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.422756 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.422765 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.525202 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.525261 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.525280 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.525303 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.525319 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.628379 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.628425 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.628436 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.628452 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.628462 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.731388 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.731430 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.731438 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.731453 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.731461 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.834083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.834137 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.834147 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.834160 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.834168 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.936980 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.937017 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.937026 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.937041 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:37 crc kubenswrapper[4664]: I1013 06:47:37.937054 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:37Z","lastTransitionTime":"2025-10-13T06:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.039295 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.039344 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.039368 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.039388 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.039400 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.046671 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.046673 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.046700 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.046772 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:38 crc kubenswrapper[4664]: E1013 06:47:38.046783 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:38 crc kubenswrapper[4664]: E1013 06:47:38.046921 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:38 crc kubenswrapper[4664]: E1013 06:47:38.046989 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:38 crc kubenswrapper[4664]: E1013 06:47:38.047064 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.141294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.141332 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.141341 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.141371 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.141382 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.245467 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.245517 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.245526 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.245545 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.245555 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.347575 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.347621 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.347640 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.347656 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.347668 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.450020 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.450073 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.450085 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.450102 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.450113 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.552817 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.552857 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.552867 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.552881 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.552893 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.655496 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.655554 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.655573 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.655600 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.655616 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.757750 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.757826 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.757843 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.757858 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.757868 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.860739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.860777 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.860791 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.860828 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.860847 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.963294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.963330 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.963339 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.963353 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:38 crc kubenswrapper[4664]: I1013 06:47:38.963362 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:38Z","lastTransitionTime":"2025-10-13T06:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.065815 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.065844 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.065852 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.065863 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.065872 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.168341 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.168365 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.168373 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.168384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.168392 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.269972 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.270013 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.270022 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.270034 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.270041 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.372477 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.372520 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.372531 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.372548 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.372559 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.474824 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.474858 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.474866 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.474878 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.474887 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.577146 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.577177 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.577185 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.577198 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.577207 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.689457 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.689481 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.689488 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.689500 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.689508 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.791661 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.791725 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.791744 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.791768 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.791785 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.893572 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.893597 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.893604 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.893617 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.893626 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.995836 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.995865 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.995872 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.995883 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:39 crc kubenswrapper[4664]: I1013 06:47:39.995893 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:39Z","lastTransitionTime":"2025-10-13T06:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.046346 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.046470 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.046494 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.046707 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.046908 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.046984 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.046910 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.047167 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.098651 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.098692 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.098703 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.098717 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.098728 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.201769 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.201862 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.201880 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.201907 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.201925 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.223313 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.223352 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.223368 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.223390 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.223408 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.237941 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:40Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.243007 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.243046 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.243056 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.243078 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.243091 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.256104 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:40Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.262045 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.262120 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.262167 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.262191 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.262206 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.285481 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:40Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.290072 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.290115 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.290132 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.290151 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.290169 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.307557 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:40Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.312197 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.312224 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.312233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.312274 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.312287 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.324595 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:40Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.324774 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.326277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.326300 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.326307 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.326320 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.326329 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.378958 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.379127 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:40 crc kubenswrapper[4664]: E1013 06:47:40.379187 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:48:12.379172682 +0000 UTC m=+100.066617874 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.428745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.428813 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.428825 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.428843 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.428854 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.533252 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.533320 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.533343 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.533370 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.533391 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.636572 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.637121 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.637555 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.637739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.637928 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.742004 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.742078 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.742096 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.742125 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.742144 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.845178 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.845739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.845971 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.846125 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.846279 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.949011 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.949051 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.949062 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.949078 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:40 crc kubenswrapper[4664]: I1013 06:47:40.949090 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:40Z","lastTransitionTime":"2025-10-13T06:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.055148 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.055729 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.056103 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.056292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.056446 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.159582 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.160175 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.160321 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.160471 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.160617 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.265191 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.265236 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.265249 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.265269 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.265285 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.368881 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.368914 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.368926 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.368940 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.368950 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.472433 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.472742 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.472827 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.472894 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.472973 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.575677 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.575759 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.575786 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.575848 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.575868 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.678839 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.678880 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.678892 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.678911 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.678923 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.782355 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.782393 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.782404 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.782419 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.782427 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.886301 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.886354 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.886364 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.886381 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.886397 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.989502 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.989534 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.989542 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.989560 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:41 crc kubenswrapper[4664]: I1013 06:47:41.989571 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:41Z","lastTransitionTime":"2025-10-13T06:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.046074 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:42 crc kubenswrapper[4664]: E1013 06:47:42.046218 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.046678 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:42 crc kubenswrapper[4664]: E1013 06:47:42.046751 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.046825 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:42 crc kubenswrapper[4664]: E1013 06:47:42.046894 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.046950 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:42 crc kubenswrapper[4664]: E1013 06:47:42.047012 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.092361 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.092438 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.092449 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.092466 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.092476 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.194979 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.195020 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.195030 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.195045 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.195054 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.297354 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.297405 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.297415 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.297431 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.297442 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.399842 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.399905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.399921 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.399938 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.399950 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.502302 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.502339 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.502349 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.502368 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.502377 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.568516 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/0.log" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.568561 4664 generic.go:334] "Generic (PLEG): container finished" podID="2f22066f-5783-48bc-85f8-0fbb2eed7e0b" containerID="cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37" exitCode=1 Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.568589 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerDied","Data":"cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.568932 4664 scope.go:117] "RemoveContainer" containerID="cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.582311 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.595295 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.606183 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.606246 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.606257 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.606273 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.606284 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.607900 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.622621 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.642295 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.660989 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.681989 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.697275 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.708817 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.708876 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.708885 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.708900 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.708920 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.717417 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.732636 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.743766 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.756022 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.772950 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.786103 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.798944 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.811635 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.811667 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.811680 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.811695 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.811705 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.820095 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.842573 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.859057 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:42Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.914903 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.914941 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.914953 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.914968 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:42 crc kubenswrapper[4664]: I1013 06:47:42.914979 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:42Z","lastTransitionTime":"2025-10-13T06:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.017600 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.017724 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.017746 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.017759 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.017768 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.057024 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.057210 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.070212 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.084187 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.105478 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.119966 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.120023 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.120039 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.120064 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.120083 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.126713 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.156408 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.169923 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.180873 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.192742 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.207039 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.221564 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.222134 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.222166 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.222177 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.222196 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.222208 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.230666 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.240423 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.256000 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.273671 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.286832 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.301004 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.311896 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.324737 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.324784 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.324823 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.324840 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.324854 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.426835 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.426917 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.426937 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.426966 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.426984 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.530683 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.530751 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.530760 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.530779 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.530790 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.574329 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/0.log" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.574431 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerStarted","Data":"2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.589970 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.604411 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.618182 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.630112 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.633340 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.633419 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.633441 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.633473 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.633497 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.644882 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.668113 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.689123 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.703841 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.713292 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.724712 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.735880 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.735934 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.735947 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.735967 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.735981 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.740472 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.753449 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.765393 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.779050 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.792198 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.803311 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.813506 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.825430 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"26d57666-7cf1-4038-b8c6-0c2f53a8b123\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://514f7f9cd13c473b6ec5f2076c2ac0391568f1d0a41490f3c3ab1b56f21f75d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.836466 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:43Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.838838 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.838913 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.838952 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.838969 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.838981 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.971849 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.971894 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.971906 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.971923 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:43 crc kubenswrapper[4664]: I1013 06:47:43.971934 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:43Z","lastTransitionTime":"2025-10-13T06:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.046596 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:44 crc kubenswrapper[4664]: E1013 06:47:44.046733 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.046837 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:44 crc kubenswrapper[4664]: E1013 06:47:44.046893 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.047627 4664 scope.go:117] "RemoveContainer" containerID="54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.048029 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:44 crc kubenswrapper[4664]: E1013 06:47:44.048096 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.048140 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:44 crc kubenswrapper[4664]: E1013 06:47:44.048187 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.082203 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.082260 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.082291 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.082309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.085845 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.188486 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.188540 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.188551 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.188566 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.188575 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.291305 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.291356 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.291369 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.291395 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.291414 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.393534 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.393568 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.393591 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.393609 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.393620 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.496117 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.496146 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.496154 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.496166 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.496174 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.578524 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/2.log" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.580813 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.581699 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.596877 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"26d57666-7cf1-4038-b8c6-0c2f53a8b123\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://514f7f9cd13c473b6ec5f2076c2ac0391568f1d0a41490f3c3ab1b56f21f75d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.598256 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.598294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.598305 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.598321 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.598332 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.607886 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.623166 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.637580 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.653913 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.671016 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.690825 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.700660 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.700711 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.700727 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.700747 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.700764 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.716538 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.736745 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.749440 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.761210 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.774886 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.786676 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.799576 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.803424 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.803461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.803471 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.803491 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.803503 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.811347 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.826141 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.840051 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.852903 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.865136 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:44Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.905825 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.905882 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.905892 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.905912 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:44 crc kubenswrapper[4664]: I1013 06:47:44.905926 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:44Z","lastTransitionTime":"2025-10-13T06:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.010826 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.010901 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.010914 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.010934 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.010950 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.113874 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.114116 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.114211 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.114276 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.114335 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.216688 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.216731 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.216740 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.216756 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.216768 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.319238 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.319505 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.319588 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.319652 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.319717 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.422145 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.422196 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.422212 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.422232 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.422245 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.525221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.525271 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.525280 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.525296 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.525305 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.586731 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/3.log" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.587518 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/2.log" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.590828 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" exitCode=1 Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.590870 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.590911 4664 scope.go:117] "RemoveContainer" containerID="54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.591599 4664 scope.go:117] "RemoveContainer" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" Oct 13 06:47:45 crc kubenswrapper[4664]: E1013 06:47:45.591768 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.620945 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.627808 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.627855 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.627866 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.627884 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.627899 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.632638 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.648660 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.662262 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.676358 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.691682 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"26d57666-7cf1-4038-b8c6-0c2f53a8b123\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://514f7f9cd13c473b6ec5f2076c2ac0391568f1d0a41490f3c3ab1b56f21f75d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.707176 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.732484 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.732527 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.732536 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.732554 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.732564 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.736446 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54a7d4b3133a752fbfe0c1b599f9186b71ea928120b595858dfcc8c43d57dfa0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:18Z\\\",\\\"message\\\":\\\"g metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1013 06:47:18.898010 6197 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1013 06:47:18.898208 6197 obj_retry.go:409] Going to retry *v1.Pod resource setup for 13 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-machine-config-operator/machine-config-daemon-hkzpl openshift-multus/multus-bg4kt openshift-ovn-kubernetes/ovnkube-node-mjr5r openshift-kube-controller-manager/kube-controller-manager-crc openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-8fhpj openshift-multus/network-metrics-daemon-9mgbt]\\\\nF1013 06:47:18.898227 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:44Z\\\",\\\"message\\\":\\\"alse}}\\\\nI1013 06:47:44.931375 6525 ovnkube_controller.go:900] Cache entry expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" but failed to find it\\\\nI1013 06:47:44.931387 6525 ovnkube_controller.go:804] Add Logical Switch Port event expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" in cache\\\\nI1013 06:47:44.931386 6525 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931398 6525 services_controller.go:445] Built service openshift-kube-apiserver/apiserver LB template configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931357 6525 default_network_controller.go:776] Recording success event on pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1013 06:47:44.931414 6525 lb_config.go:1031] Cluster endpoints for openshift-ingress-operator/metrics for network=default are: map[]\\\\nI1013 06:47:44.931435 6525 services_controller.go:356] Processing sync for service openshift-oauth-apiserver/api for network=default\\\\nI1013 06:47:44.931373 6525 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:44.931518 6525 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.761232 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.781546 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.800589 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.814120 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.830191 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.834700 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.834747 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.834762 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.834779 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.834809 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.845062 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.857789 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.870195 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.885535 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.898184 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.908231 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:45Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.937661 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.937691 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.937700 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.937715 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:45 crc kubenswrapper[4664]: I1013 06:47:45.937725 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:45Z","lastTransitionTime":"2025-10-13T06:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.039690 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.039739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.039751 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.039786 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.039817 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.046068 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.046109 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.046119 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:46 crc kubenswrapper[4664]: E1013 06:47:46.046175 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:46 crc kubenswrapper[4664]: E1013 06:47:46.046278 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:46 crc kubenswrapper[4664]: E1013 06:47:46.046412 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.046427 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:46 crc kubenswrapper[4664]: E1013 06:47:46.046684 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.142469 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.142515 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.142526 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.142544 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.142555 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.246840 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.246876 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.246886 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.246903 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.246915 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.349917 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.349992 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.350004 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.350037 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.350060 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.454485 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.454577 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.454601 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.454632 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.454666 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.557339 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.557387 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.557399 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.557416 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.557428 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.597178 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/3.log" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.602159 4664 scope.go:117] "RemoveContainer" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" Oct 13 06:47:46 crc kubenswrapper[4664]: E1013 06:47:46.602322 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.631446 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.655673 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:44Z\\\",\\\"message\\\":\\\"alse}}\\\\nI1013 06:47:44.931375 6525 ovnkube_controller.go:900] Cache entry expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" but failed to find it\\\\nI1013 06:47:44.931387 6525 ovnkube_controller.go:804] Add Logical Switch Port event expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" in cache\\\\nI1013 06:47:44.931386 6525 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931398 6525 services_controller.go:445] Built service openshift-kube-apiserver/apiserver LB template configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931357 6525 default_network_controller.go:776] Recording success event on pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1013 06:47:44.931414 6525 lb_config.go:1031] Cluster endpoints for openshift-ingress-operator/metrics for network=default are: map[]\\\\nI1013 06:47:44.931435 6525 services_controller.go:356] Processing sync for service openshift-oauth-apiserver/api for network=default\\\\nI1013 06:47:44.931373 6525 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:44.931518 6525 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.661815 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.661850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.661863 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.661884 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.661898 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.691377 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.713689 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.734059 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.752270 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.764574 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.764635 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.764648 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.764670 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.764682 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.766096 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.783360 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.799177 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.820905 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.836375 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.851087 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.865961 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.867946 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.868102 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.868244 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.868391 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.868505 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.884022 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.904292 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.923111 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.939457 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.951078 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"26d57666-7cf1-4038-b8c6-0c2f53a8b123\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://514f7f9cd13c473b6ec5f2076c2ac0391568f1d0a41490f3c3ab1b56f21f75d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.967122 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:46Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.971439 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.971477 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.971491 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.971513 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:46 crc kubenswrapper[4664]: I1013 06:47:46.971528 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:46Z","lastTransitionTime":"2025-10-13T06:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.075126 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.075465 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.075648 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.075906 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.076108 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.179040 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.179106 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.179122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.179143 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.179157 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.282308 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.282696 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.282882 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.283026 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.283188 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.385477 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.385706 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.385715 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.385728 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.385737 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.488529 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.488592 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.488610 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.488635 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.488652 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.592967 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.593024 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.593045 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.593075 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.593096 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.696480 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.696546 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.696563 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.696592 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.696613 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.801784 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.801918 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.801938 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.801969 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.801991 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.906124 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.906208 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.906230 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.906259 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:47 crc kubenswrapper[4664]: I1013 06:47:47.906280 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:47Z","lastTransitionTime":"2025-10-13T06:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.011832 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.011912 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.011936 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.011972 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.011998 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.046288 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:48 crc kubenswrapper[4664]: E1013 06:47:48.046427 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.046438 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:48 crc kubenswrapper[4664]: E1013 06:47:48.046721 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.047079 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.047112 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:48 crc kubenswrapper[4664]: E1013 06:47:48.047202 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:48 crc kubenswrapper[4664]: E1013 06:47:48.047389 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.114906 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.115002 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.115025 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.115058 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.115081 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.217749 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.217818 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.217831 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.217847 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.217859 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.320742 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.320827 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.320843 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.320868 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.320883 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.423544 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.423605 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.423624 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.423642 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.423655 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.528209 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.528395 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.528441 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.528492 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.528507 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.631668 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.631697 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.631705 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.631717 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.631725 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.734388 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.734435 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.734444 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.734462 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.734472 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.839823 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.839910 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.839929 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.839957 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.839976 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.944081 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.944135 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.944144 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.944161 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:48 crc kubenswrapper[4664]: I1013 06:47:48.944177 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:48Z","lastTransitionTime":"2025-10-13T06:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.047204 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.047261 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.047270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.047283 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.047295 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.149665 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.149733 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.149745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.149786 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.150360 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.254417 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.254466 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.254477 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.254494 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.254507 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.357241 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.357299 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.357320 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.357347 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.357367 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.461226 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.461347 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.461366 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.461391 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.461408 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.564970 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.565059 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.565082 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.565117 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.565138 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.668662 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.668726 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.668743 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.668762 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.668775 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.772778 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.772887 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.772906 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.772936 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.772957 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.876508 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.876568 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.876586 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.876611 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.876632 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.980085 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.980174 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.980200 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.980258 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:49 crc kubenswrapper[4664]: I1013 06:47:49.980284 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:49Z","lastTransitionTime":"2025-10-13T06:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.046167 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.046238 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.046389 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.046653 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.046630 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.046924 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.047035 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.047117 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.083693 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.083742 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.083757 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.083779 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.083828 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.186842 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.186899 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.186923 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.186952 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.186974 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.289869 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.289993 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.290014 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.290042 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.290062 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.393552 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.393620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.393641 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.393672 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.393692 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.483015 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.483107 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.483142 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.483162 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.483173 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.502968 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:50Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.507642 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.507694 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.507705 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.507724 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.507738 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.524152 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:50Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.530671 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.530720 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.530732 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.530752 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.530766 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.553397 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:50Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.564111 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.564160 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.564179 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.564199 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.564212 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.584150 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:50Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.590858 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.590895 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.590905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.590925 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.590937 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.608408 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:50Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:50 crc kubenswrapper[4664]: E1013 06:47:50.608558 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.611002 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.611040 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.611055 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.611076 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.611091 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.714240 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.714275 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.714284 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.714317 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.714326 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.818032 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.818102 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.818119 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.818143 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.818163 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.921949 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.922014 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.922026 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.922077 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:50 crc kubenswrapper[4664]: I1013 06:47:50.922091 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:50Z","lastTransitionTime":"2025-10-13T06:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.025860 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.025957 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.025982 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.026047 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.026067 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.129981 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.130083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.130111 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.130135 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.130152 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.233384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.233482 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.233583 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.233685 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.233714 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.337443 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.337553 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.337569 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.337589 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.337602 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.441316 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.441416 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.441435 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.441501 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.441520 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.544485 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.544525 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.544536 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.544552 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.544564 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.648224 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.648313 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.648335 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.648367 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.648388 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.752012 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.752084 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.752104 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.752127 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.752152 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.855596 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.855670 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.855688 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.855713 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.855730 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.957680 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.957726 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.957739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.957757 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:51 crc kubenswrapper[4664]: I1013 06:47:51.957770 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:51Z","lastTransitionTime":"2025-10-13T06:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.046103 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.046200 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.046251 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:52 crc kubenswrapper[4664]: E1013 06:47:52.046283 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.046221 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:52 crc kubenswrapper[4664]: E1013 06:47:52.046346 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:52 crc kubenswrapper[4664]: E1013 06:47:52.046411 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:52 crc kubenswrapper[4664]: E1013 06:47:52.046544 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.061180 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.061244 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.061261 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.061284 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.061300 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.164638 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.164744 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.164757 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.164776 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.164788 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.267973 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.268038 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.268056 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.268083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.268100 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.371257 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.371301 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.371326 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.371341 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.371352 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.475487 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.475565 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.475585 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.476045 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.476101 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.580070 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.580117 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.580134 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.580153 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.580166 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.682461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.682510 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.682522 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.682540 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.682551 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.785374 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.785892 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.785913 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.785937 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.785959 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.889526 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.889614 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.889636 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.889665 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.889684 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.993318 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.993430 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.993450 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.993507 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:52 crc kubenswrapper[4664]: I1013 06:47:52.993526 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:52Z","lastTransitionTime":"2025-10-13T06:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.065763 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.083765 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.096200 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.096275 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.096291 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.096310 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.096346 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.102237 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.123734 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.154524 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:44Z\\\",\\\"message\\\":\\\"alse}}\\\\nI1013 06:47:44.931375 6525 ovnkube_controller.go:900] Cache entry expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" but failed to find it\\\\nI1013 06:47:44.931387 6525 ovnkube_controller.go:804] Add Logical Switch Port event expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" in cache\\\\nI1013 06:47:44.931386 6525 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931398 6525 services_controller.go:445] Built service openshift-kube-apiserver/apiserver LB template configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931357 6525 default_network_controller.go:776] Recording success event on pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1013 06:47:44.931414 6525 lb_config.go:1031] Cluster endpoints for openshift-ingress-operator/metrics for network=default are: map[]\\\\nI1013 06:47:44.931435 6525 services_controller.go:356] Processing sync for service openshift-oauth-apiserver/api for network=default\\\\nI1013 06:47:44.931373 6525 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:44.931518 6525 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.180747 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.199988 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.200048 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.200135 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.200153 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.200207 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.206147 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.230391 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.249348 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.273110 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.291445 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.302973 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.303015 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.303026 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.303040 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.303050 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.304985 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.324611 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.340675 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.358836 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.376506 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.390613 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.403630 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"26d57666-7cf1-4038-b8c6-0c2f53a8b123\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://514f7f9cd13c473b6ec5f2076c2ac0391568f1d0a41490f3c3ab1b56f21f75d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.405625 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.405701 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.405719 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.405745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.406070 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.417442 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:47:53Z is after 2025-08-24T17:21:41Z" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.509908 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.510626 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.510960 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.511211 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.511435 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.614550 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.614630 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.614650 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.614679 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.614699 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.718974 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.719050 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.719062 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.719078 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.719089 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.823262 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.823309 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.823318 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.823333 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.823343 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.926933 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.927757 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.927991 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.928170 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:53 crc kubenswrapper[4664]: I1013 06:47:53.928329 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:53Z","lastTransitionTime":"2025-10-13T06:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.033031 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.033119 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.033146 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.033190 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.033216 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.046310 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.046366 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.046360 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.046448 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:54 crc kubenswrapper[4664]: E1013 06:47:54.046529 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:54 crc kubenswrapper[4664]: E1013 06:47:54.046682 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:54 crc kubenswrapper[4664]: E1013 06:47:54.046713 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:54 crc kubenswrapper[4664]: E1013 06:47:54.046776 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.136315 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.136361 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.136371 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.136388 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.136398 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.239295 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.239349 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.239361 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.239379 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.239391 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.343570 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.343645 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.343662 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.343696 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.343726 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.447529 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.447631 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.447653 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.447688 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.447708 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.550053 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.550084 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.550092 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.550104 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.550112 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.651404 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.651446 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.651458 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.651473 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.651484 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.755659 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.755722 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.755745 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.755769 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.755784 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.860516 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.860608 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.860627 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.860654 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.860673 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.963828 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.963880 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.963895 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.963916 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:54 crc kubenswrapper[4664]: I1013 06:47:54.963931 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:54Z","lastTransitionTime":"2025-10-13T06:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.067219 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.067277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.067294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.067319 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.067337 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.171580 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.171650 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.171667 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.171695 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.171718 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.275620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.275698 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.275717 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.275744 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.275765 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.380347 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.380441 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.380478 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.380513 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.380534 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.484329 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.484422 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.484448 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.484487 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.484511 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.588554 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.588623 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.588647 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.588684 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.588710 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.692963 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.693034 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.693053 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.693079 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.693100 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.798134 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.798208 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.798226 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.798253 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.798271 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.902522 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.902583 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.902600 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.902620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:55 crc kubenswrapper[4664]: I1013 06:47:55.902634 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:55Z","lastTransitionTime":"2025-10-13T06:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.005832 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.005914 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.005932 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.005964 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.005985 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.046151 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.046239 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.046296 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.046168 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:56 crc kubenswrapper[4664]: E1013 06:47:56.046419 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:56 crc kubenswrapper[4664]: E1013 06:47:56.046845 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:56 crc kubenswrapper[4664]: E1013 06:47:56.047014 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:56 crc kubenswrapper[4664]: E1013 06:47:56.047155 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.110060 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.110147 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.110165 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.110188 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.110240 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.213681 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.213741 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.213761 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.213787 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.213834 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.318006 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.318069 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.318085 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.318110 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.318128 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.421162 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.421227 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.421244 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.421270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.421288 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.524221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.524277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.524294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.524319 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.524338 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.627384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.627428 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.627450 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.627470 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.627483 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.730465 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.730546 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.730566 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.730591 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.730611 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.833150 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.833207 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.833217 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.833234 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.833246 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.935715 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.935765 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.935783 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.935831 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:56 crc kubenswrapper[4664]: I1013 06:47:56.935850 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:56Z","lastTransitionTime":"2025-10-13T06:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.038395 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.038459 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.038482 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.038585 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.038618 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.141605 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.141700 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.141729 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.141759 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.141779 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.245038 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.245102 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.245122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.245149 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.245171 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.348669 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.348741 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.348762 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.348830 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.348855 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.453069 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.453129 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.453140 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.453165 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.453183 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.557578 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.557645 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.557666 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.557692 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.557711 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.660881 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.660952 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.660969 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.661000 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.661020 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.764726 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.764763 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.764772 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.764786 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.764821 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.868068 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.868134 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.868152 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.868176 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.868194 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.971998 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.972056 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.972079 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.972110 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.972126 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:57Z","lastTransitionTime":"2025-10-13T06:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:57 crc kubenswrapper[4664]: I1013 06:47:57.985449 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:47:57 crc kubenswrapper[4664]: E1013 06:47:57.985664 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.985647183 +0000 UTC m=+149.673092375 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.046099 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.046157 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.046234 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.046281 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.046287 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.046395 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.046545 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.047110 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.048198 4664 scope.go:117] "RemoveContainer" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.048515 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.074271 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.074297 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.074305 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.074316 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.074325 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.086068 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.086108 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.086133 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.086149 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086234 4664 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086269 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.086258114 +0000 UTC m=+149.773703306 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086393 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086412 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086422 4664 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086443 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.086436909 +0000 UTC m=+149.773882101 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086546 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086561 4664 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086568 4664 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086588 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.086581653 +0000 UTC m=+149.774026845 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086676 4664 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: E1013 06:47:58.086696 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.086690816 +0000 UTC m=+149.774136008 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.176409 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.176435 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.176443 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.176455 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.176464 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.279832 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.279868 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.279876 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.279889 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.279898 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.382293 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.382565 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.382577 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.382593 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.382605 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.485140 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.485185 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.485201 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.485223 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.485240 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.589553 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.589599 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.589610 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.589625 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.589637 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.692716 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.692768 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.692779 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.692819 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.692835 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.796322 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.796358 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.796371 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.796387 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.796399 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.898695 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.898769 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.898789 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.898869 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:58 crc kubenswrapper[4664]: I1013 06:47:58.898895 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:58Z","lastTransitionTime":"2025-10-13T06:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.001913 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.001965 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.001983 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.002007 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.002022 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.104233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.104268 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.104277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.104292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.104301 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.207408 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.207439 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.207447 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.207460 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.207470 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.310836 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.310894 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.310912 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.310938 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.310958 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.414253 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.414283 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.414292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.414305 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.414313 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.517344 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.517419 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.517446 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.517477 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.517500 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.620847 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.620912 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.620935 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.620965 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.620986 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.724352 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.724431 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.724452 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.724478 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.724499 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.828249 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.828310 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.828327 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.828351 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.828370 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.932463 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.932548 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.932568 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.932598 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:47:59 crc kubenswrapper[4664]: I1013 06:47:59.932620 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:47:59Z","lastTransitionTime":"2025-10-13T06:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.037109 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.037164 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.037182 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.037207 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.037225 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.046914 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.047022 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.047068 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.047301 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.047310 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.047411 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.047558 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.047689 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.140577 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.140634 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.140645 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.140664 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.140701 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.243636 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.243682 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.243700 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.243727 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.243747 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.347343 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.347415 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.347435 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.347466 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.347485 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.451713 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.451855 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.451882 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.451915 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.451937 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.554563 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.554630 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.554653 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.554676 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.554692 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.657411 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.657462 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.657474 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.657495 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.657511 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.686902 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.686965 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.686979 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.687000 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.687303 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.706457 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.712497 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.712578 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.712606 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.712635 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.712654 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.733681 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.740300 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.740378 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.740400 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.740430 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.740451 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.762432 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.768279 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.768328 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.768341 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.768360 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.768374 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.791926 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.796845 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.796884 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.796896 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.796914 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.796927 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.815828 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:00Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:00 crc kubenswrapper[4664]: E1013 06:48:00.816055 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.819287 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.819344 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.819362 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.819392 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.819413 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.922787 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.922880 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.922898 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.922924 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:00 crc kubenswrapper[4664]: I1013 06:48:00.922942 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:00Z","lastTransitionTime":"2025-10-13T06:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.026576 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.026620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.026637 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.026660 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.026678 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.130758 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.130861 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.130878 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.130905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.130924 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.234854 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.234943 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.234971 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.235005 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.235030 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.339072 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.339589 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.339749 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.339936 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.340073 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.444612 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.444714 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.444742 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.444777 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.444838 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.549190 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.549247 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.549270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.549297 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.549349 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.653151 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.653220 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.653237 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.653261 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.653315 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.758287 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.758769 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.759224 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.759384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.759541 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.863620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.863665 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.863678 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.863693 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.863703 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.966405 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.966471 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.966485 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.966501 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:01 crc kubenswrapper[4664]: I1013 06:48:01.966514 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:01Z","lastTransitionTime":"2025-10-13T06:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.046790 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.046872 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.046903 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.047101 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:02 crc kubenswrapper[4664]: E1013 06:48:02.047311 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:02 crc kubenswrapper[4664]: E1013 06:48:02.047406 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:02 crc kubenswrapper[4664]: E1013 06:48:02.047501 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:02 crc kubenswrapper[4664]: E1013 06:48:02.047665 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.069990 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.070052 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.070070 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.070094 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.070112 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.174463 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.174525 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.174545 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.174572 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.174592 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.278242 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.278288 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.278311 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.278334 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.278350 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.381069 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.381161 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.381181 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.381294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.381358 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.486230 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.486292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.486308 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.486335 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.486352 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.589497 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.589577 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.589600 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.589636 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.589661 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.692984 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.693057 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.693081 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.693114 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.693139 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.797459 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.797573 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.797597 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.797623 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.797643 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.901526 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.901596 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.901615 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.901642 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:02 crc kubenswrapper[4664]: I1013 06:48:02.901661 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:02Z","lastTransitionTime":"2025-10-13T06:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.006940 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.007029 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.007053 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.007087 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.007117 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.084626 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74eb7029-982d-4294-bed0-63ffe7281479\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:44Z\\\",\\\"message\\\":\\\"alse}}\\\\nI1013 06:47:44.931375 6525 ovnkube_controller.go:900] Cache entry expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" but failed to find it\\\\nI1013 06:47:44.931387 6525 ovnkube_controller.go:804] Add Logical Switch Port event expected pod with UID \\\\\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\\\\\" in cache\\\\nI1013 06:47:44.931386 6525 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931398 6525 services_controller.go:445] Built service openshift-kube-apiserver/apiserver LB template configs for network=default: []services.lbConfig(nil)\\\\nI1013 06:47:44.931357 6525 default_network_controller.go:776] Recording success event on pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1013 06:47:44.931414 6525 lb_config.go:1031] Cluster endpoints for openshift-ingress-operator/metrics for network=default are: map[]\\\\nI1013 06:47:44.931435 6525 services_controller.go:356] Processing sync for service openshift-oauth-apiserver/api for network=default\\\\nI1013 06:47:44.931373 6525 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1013 06:47:44.931518 6525 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-knvjm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mjr5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.111436 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.111476 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.111487 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.111506 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.111517 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.113126 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88bfde0a-8ca2-434d-879a-e403b3f4cd54\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d05af5384c7e939aa135454554c6a341947b9684ed8c01589ad33efbaad8ab7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02d4e5e968b905c2b36cbf260192e582d8259a0eb64f66ce194922ca66dd6be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7424d9f19242b4b9785d8ebbcf987b258a7d8e2f2736f52609d48861f819e659\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a954b60acd7a87bc352b9f02509464a3bc555971809d26eafb2e7675b795e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f953eb06b67ec17af90d3b8767497166bb71aad689541f37078653a6ae6af64e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5df975b2e232f6d80c9f24e75d72bc765cd8f2083322e24542e39aa280b6b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc0eb7fcb90360038c7f127f71aeab37f54b53b22c90be72c3c92fb959631633\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38ae709a9279204652907f7e5bb88e408f85e66799f00c16f3187af88a39ae5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.129450 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a76be960-f5c4-448e-b007-c8445d4a47ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1899101e9abcffbdbac4444a764e491e2510356fe227999c7822e3b27df14cb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81e0662a7e46a9739897f772ead26a9a7acc9b9b4b85aa6d4f53fb49f69a70d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0f15d9a77def768a40485a26f8016c1913959e468b7fa09c19afa6c232c537a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f72ddec79febca915791160ebf089b9d5a01fdcf7a25df5f2466bc410b0c905a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7dfb03762ec8f131521e8d4bbc6205ea7c78a01649ccc93700357b2189a92402\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-13T06:46:52Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1013 06:46:46.958932 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1013 06:46:46.962309 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4074504243/tls.crt::/tmp/serving-cert-4074504243/tls.key\\\\\\\"\\\\nI1013 06:46:52.574272 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1013 06:46:52.580102 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1013 06:46:52.580288 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1013 06:46:52.580319 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1013 06:46:52.580341 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1013 06:46:52.594340 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1013 06:46:52.594376 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594386 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1013 06:46:52.594396 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1013 06:46:52.594404 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1013 06:46:52.594409 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1013 06:46:52.594415 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1013 06:46:52.594674 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1013 06:46:52.595591 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab75d426858ccaa6f73546e94c83b85a25bd3ab486ce2e7e4a9580617cd1f205\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720e2ad06cce549c09d800d5454c470b0de229bf3e6bccaca3aad095f3c54c99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.143900 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55b19636-37fa-4620-83f5-92a2ef43b73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69e5511b3a5c77e8faf1422f00748c6265e28922a8fb04f649c3d540266e25ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfe7fc88d17459b8cfa36d5945e619ebf27cfb49e95b8312d27755ada84ffc71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3bb727847bee627f12c1c355b503cd179bfba3d5c8fc0eccc583ed876cb717\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeacf68ec038d40d0c1887720565132eee580b231cfc5c7b5f4a00b52624439b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.159033 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7ccf661-feaf-4aa3-a45c-067880d47fd6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a912f4ce8cd0e6663149b0a31f741548222d5844a8082bca3ea12bc528149471\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://962d463de6011e928e1a380cd6c313228a85e71af9c82ec47bf98f7264f1d3ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4556aeb5ca7ebe6dec25827d566876bc24e9b04be87a8dbe5c87def7d7c7082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.176874 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.194097 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e421cf8b4df5d060efeb7e3a4666d3bb19eced3654d5ab80d9d5011ea1b976ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.207998 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.214073 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.214125 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.214140 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.214161 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.214175 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.224321 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bg4kt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f22066f-5783-48bc-85f8-0fbb2eed7e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-13T06:47:41Z\\\",\\\"message\\\":\\\"2025-10-13T06:46:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680\\\\n2025-10-13T06:46:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_93e2a015-905a-4215-9b45-6a3fb16e6680 to /host/opt/cni/bin/\\\\n2025-10-13T06:46:56Z [verbose] multus-daemon started\\\\n2025-10-13T06:46:56Z [verbose] Readiness Indicator file check\\\\n2025-10-13T06:47:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6rv6j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bg4kt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.239698 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1406c08-b6a4-404d-9b44-05ee214a555d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://533540ae2e7ffb8744aade18be4fac8048b799707e684b570b35f7e8fa4a1ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fae95d61b764f58607bd961ef5f60612ebfaea3b1506aa3d184328dd363e20e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9cdd848c6f1e14e0d355735fbec9037873986904f34a2022be8707b2c6a23788\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0aa3c0bcf65151d39862abab16e85180c610db43cb2c019245bf8c7f5ae1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e3a2ccedd88af9eb8fde976569f8d102173905eb145a7e759de69df8583404e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dff41de9da33b77f6cbc371ad935019b160b460969dab0d6f4296853643efc08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8c4fffc62a31984daebbf1a90b0866f1ee09c80a5697c79186c19984f688e39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wnscf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xh2nz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.253150 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8fhpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"885fdacf-66f6-46d0-bc3f-f23f8edd8fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://702ba2d9c9dad3ec98e584344d0d32a7c0a06e752d3a8814635c55253fe93825\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-znc5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8fhpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.266535 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d33b17b0-8a4f-493a-a316-7d32a7f1ae45\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cae16c7e56bbfa927b1af18c8449792b10598e275da8c83816325165cbf177d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://996a6dcfc059d6d41dd2c119cb40857c9d47e9ab2cfd9ce0617bed42285eecd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wcn42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r8wrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.281390 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4274b63a176bf8fc92daf352ef08b1ee951ca0945d905023a0368dd5c49e5d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.297640 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5368cdd7b0f9a64176d34d3b1df30501427c9c5809475e8326f82de4b2a2528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24e826f62fa987edecab982436bec8f47b1b503b5a30095514e3f63e9ffbe29b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.313550 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35504ef1-729c-4404-bd49-0d82bf23ccbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29e97f57a17d621e2f3c443f90a8f695fc17bb279d40ae5376ab24b3bde1b4c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vhrws\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hkzpl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.317416 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.317453 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.317467 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.317484 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.317498 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.330223 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.344500 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-96lj2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62a87cfd-bdfc-4cf3-a081-b204fbe37d5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://805217efe4fa36eb86197b719df4f87302930d3638d6199af6195fa885e0dea5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85x8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-96lj2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.361212 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"26d57666-7cf1-4038-b8c6-0c2f53a8b123\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:46:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://514f7f9cd13c473b6ec5f2076c2ac0391568f1d0a41490f3c3ab1b56f21f75d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-13T06:46:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f65689552c5a10bb46d377b9fd754faf397926a3ff47c74cf686d15126d1ac2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-13T06:46:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-13T06:46:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:46:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.380492 4664 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eba49cc7-48bb-4372-8eb3-c88513c591b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-13T06:47:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v2nb7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-13T06:47:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9mgbt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:03Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.420911 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.420967 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.420990 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.421017 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.421039 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.523467 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.523513 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.523522 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.523538 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.523548 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.626270 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.626637 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.626836 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.626952 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.627121 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.730820 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.730879 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.730893 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.730908 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.731189 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.833854 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.833896 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.833910 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.833927 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.833940 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.936750 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.936856 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.936879 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.936904 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:03 crc kubenswrapper[4664]: I1013 06:48:03.936920 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:03Z","lastTransitionTime":"2025-10-13T06:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.040487 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.040553 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.040565 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.040586 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.040971 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.046775 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.046854 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.046881 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.046817 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:04 crc kubenswrapper[4664]: E1013 06:48:04.047013 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:04 crc kubenswrapper[4664]: E1013 06:48:04.047151 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:04 crc kubenswrapper[4664]: E1013 06:48:04.047289 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:04 crc kubenswrapper[4664]: E1013 06:48:04.047456 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.143995 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.144070 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.144129 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.144172 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.144199 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.248586 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.248638 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.248656 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.248682 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.248700 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.352079 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.352145 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.352163 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.352190 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.352209 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.456005 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.456046 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.456055 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.456070 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.456083 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.559483 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.559540 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.559551 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.559574 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.559586 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.662996 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.663047 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.663056 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.663072 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.663082 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.766131 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.766213 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.766233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.766262 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.766284 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.869459 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.869531 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.869550 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.869580 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.869604 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.972766 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.972840 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.972853 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.972877 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:04 crc kubenswrapper[4664]: I1013 06:48:04.972891 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:04Z","lastTransitionTime":"2025-10-13T06:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.076108 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.076155 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.076165 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.076181 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.076192 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.179326 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.179400 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.179422 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.179454 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.179475 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.282238 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.282289 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.282299 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.282317 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.282327 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.385171 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.385212 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.385224 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.385239 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.385252 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.487543 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.487587 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.487603 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.487620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.487630 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.591262 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.591294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.591304 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.591320 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.591329 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.693920 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.693971 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.693983 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.693999 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.694011 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.797205 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.797260 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.797276 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.797297 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.797313 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.900164 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.900200 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.900217 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.900233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:05 crc kubenswrapper[4664]: I1013 06:48:05.900243 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:05Z","lastTransitionTime":"2025-10-13T06:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.002696 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.002725 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.002736 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.002750 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.002760 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.046548 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:06 crc kubenswrapper[4664]: E1013 06:48:06.046669 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.046889 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:06 crc kubenswrapper[4664]: E1013 06:48:06.046941 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.046957 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.047001 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:06 crc kubenswrapper[4664]: E1013 06:48:06.047069 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:06 crc kubenswrapper[4664]: E1013 06:48:06.047169 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.105429 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.105461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.105469 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.105482 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.105500 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.208120 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.208167 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.208182 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.208202 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.208217 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.310768 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.310850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.310865 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.310883 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.310899 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.413557 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.413611 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.413635 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.413656 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.413673 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.523277 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.523338 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.523354 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.523377 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.523394 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.625459 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.625510 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.625521 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.625538 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.625550 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.727959 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.728003 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.728016 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.728030 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.728041 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.831905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.831935 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.831966 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.831983 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.831994 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.934789 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.934843 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.934854 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.934868 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:06 crc kubenswrapper[4664]: I1013 06:48:06.934877 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:06Z","lastTransitionTime":"2025-10-13T06:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.037503 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.037535 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.037544 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.037557 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.037568 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.140258 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.140299 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.140308 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.140325 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.140334 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.242131 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.242170 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.242178 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.242194 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.242204 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.344328 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.344376 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.344384 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.344399 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.344409 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.446646 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.446681 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.446689 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.446702 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.446712 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.548928 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.548970 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.548980 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.548997 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.549008 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.651083 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.651116 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.651126 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.651139 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.651148 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.753703 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.753739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.753746 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.753776 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.753787 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.856604 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.856658 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.856677 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.856700 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.856718 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.960213 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.960263 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.960274 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.960291 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:07 crc kubenswrapper[4664]: I1013 06:48:07.960302 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:07Z","lastTransitionTime":"2025-10-13T06:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.046228 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:08 crc kubenswrapper[4664]: E1013 06:48:08.046471 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.046577 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.046667 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:08 crc kubenswrapper[4664]: E1013 06:48:08.046731 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:08 crc kubenswrapper[4664]: E1013 06:48:08.046867 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.046931 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:08 crc kubenswrapper[4664]: E1013 06:48:08.046986 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.062043 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.062076 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.062085 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.062098 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.062109 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.164041 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.164069 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.164076 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.164091 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.164100 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.266612 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.266647 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.266655 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.266668 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.266676 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.368481 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.368514 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.368522 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.368537 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.368547 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.470462 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.470497 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.470506 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.470517 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.470527 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.573902 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.573955 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.573968 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.573987 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.574016 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.676466 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.676559 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.676587 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.676635 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.676666 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.778654 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.778733 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.778756 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.778787 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.778852 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.881588 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.881632 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.881642 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.881656 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.881667 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.984251 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.984294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.984302 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.984317 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:08 crc kubenswrapper[4664]: I1013 06:48:08.984326 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:08Z","lastTransitionTime":"2025-10-13T06:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.086131 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.086169 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.086178 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.086190 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.086201 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.188388 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.188424 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.188432 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.188444 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.188453 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.291500 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.291860 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.291975 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.292128 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.292262 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.395158 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.395188 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.395195 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.395209 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.395218 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.497857 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.497896 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.497908 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.497941 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.497955 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.599844 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.599876 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.599887 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.599905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.599917 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.702088 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.702125 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.702136 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.702152 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.702166 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.804506 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.804539 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.804550 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.804570 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.804584 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.907402 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.907468 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.907485 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.907512 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:09 crc kubenswrapper[4664]: I1013 06:48:09.907528 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:09Z","lastTransitionTime":"2025-10-13T06:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.010318 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.010360 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.010377 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.010393 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.010402 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.046152 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.046237 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.046370 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:10 crc kubenswrapper[4664]: E1013 06:48:10.046360 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:10 crc kubenswrapper[4664]: E1013 06:48:10.046559 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:10 crc kubenswrapper[4664]: E1013 06:48:10.046584 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.046922 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:10 crc kubenswrapper[4664]: E1013 06:48:10.047038 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.047394 4664 scope.go:117] "RemoveContainer" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" Oct 13 06:48:10 crc kubenswrapper[4664]: E1013 06:48:10.047574 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.113362 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.113415 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.113561 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.113587 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.113624 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.216674 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.216722 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.216736 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.216753 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.216765 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.319294 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.319565 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.319632 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.319732 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.319857 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.421673 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.421712 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.421720 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.421736 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.421747 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.523897 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.523944 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.523957 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.523974 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.523986 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.626446 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.626695 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.626762 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.626888 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.626993 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.730861 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.731340 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.731406 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.731468 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.731554 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.834290 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.834564 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.834624 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.834688 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.834745 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.938396 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.938878 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.939048 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.939203 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:10 crc kubenswrapper[4664]: I1013 06:48:10.939335 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:10Z","lastTransitionTime":"2025-10-13T06:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.042865 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.042942 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.042960 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.042986 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.043006 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.146258 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.146304 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.146315 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.146329 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.146342 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.216588 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.216712 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.216731 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.216789 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.216845 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: E1013 06:48:11.234056 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:11Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.239861 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.239900 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.239909 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.239924 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.239934 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: E1013 06:48:11.264883 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:11Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.270880 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.270958 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.270982 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.271012 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.271037 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: E1013 06:48:11.291232 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:11Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.297062 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.297111 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.297130 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.297157 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.297176 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: E1013 06:48:11.313745 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:11Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.319563 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.319627 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.319644 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.319671 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.319689 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: E1013 06:48:11.341409 4664 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-13T06:48:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"91d5f35e-3847-46d0-ad62-97041d9b1127\\\",\\\"systemUUID\\\":\\\"56030f0e-24c6-4539-80c0-32dccf756401\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-13T06:48:11Z is after 2025-08-24T17:21:41Z" Oct 13 06:48:11 crc kubenswrapper[4664]: E1013 06:48:11.341590 4664 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.343937 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.343972 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.343986 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.344003 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.344013 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.447704 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.447828 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.447868 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.448038 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.448091 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.551871 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.551922 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.551941 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.551965 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.551981 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.654928 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.655002 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.655027 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.655381 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.655652 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.761240 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.761333 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.761358 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.761395 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.761421 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.864867 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.864932 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.864950 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.864978 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.864998 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.968729 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.968790 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.968839 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.968865 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:11 crc kubenswrapper[4664]: I1013 06:48:11.968885 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:11Z","lastTransitionTime":"2025-10-13T06:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.046720 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.046903 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.046978 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.047006 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:12 crc kubenswrapper[4664]: E1013 06:48:12.047022 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:12 crc kubenswrapper[4664]: E1013 06:48:12.047138 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:12 crc kubenswrapper[4664]: E1013 06:48:12.047251 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:12 crc kubenswrapper[4664]: E1013 06:48:12.047393 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.071927 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.071986 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.072014 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.072043 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.072067 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.176104 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.176186 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.176205 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.176232 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.176252 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.279962 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.280056 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.280069 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.280084 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.280094 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.384427 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.384501 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.384520 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.384547 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.384566 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.450665 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:12 crc kubenswrapper[4664]: E1013 06:48:12.450940 4664 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:48:12 crc kubenswrapper[4664]: E1013 06:48:12.451056 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs podName:eba49cc7-48bb-4372-8eb3-c88513c591b9 nodeName:}" failed. No retries permitted until 2025-10-13 06:49:16.451026308 +0000 UTC m=+164.138471530 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs") pod "network-metrics-daemon-9mgbt" (UID: "eba49cc7-48bb-4372-8eb3-c88513c591b9") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.488484 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.488545 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.488566 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.488593 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.488611 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.592140 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.592204 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.592223 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.592254 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.592275 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.696474 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.696532 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.696548 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.696571 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.696591 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.799643 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.799687 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.799698 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.799712 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.799723 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.902965 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.903027 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.903043 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.903062 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:12 crc kubenswrapper[4664]: I1013 06:48:12.903075 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:12Z","lastTransitionTime":"2025-10-13T06:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.007302 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.007374 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.007390 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.007411 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.007427 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.113289 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.113394 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.113454 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.113483 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.113503 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.120993 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=80.120970854 podStartE2EDuration="1m20.120970854s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.090119169 +0000 UTC m=+100.777564401" watchObservedRunningTime="2025-10-13 06:48:13.120970854 +0000 UTC m=+100.808416076" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.145014 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=77.144988427 podStartE2EDuration="1m17.144988427s" podCreationTimestamp="2025-10-13 06:46:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.122932388 +0000 UTC m=+100.810377640" watchObservedRunningTime="2025-10-13 06:48:13.144988427 +0000 UTC m=+100.832433629" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.145428 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=48.145420839 podStartE2EDuration="48.145420839s" podCreationTimestamp="2025-10-13 06:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.144213735 +0000 UTC m=+100.831658967" watchObservedRunningTime="2025-10-13 06:48:13.145420839 +0000 UTC m=+100.832866041" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.215678 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.215737 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.215750 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.215784 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.215875 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.286307 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-xh2nz" podStartSLOduration=81.286288759 podStartE2EDuration="1m21.286288759s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.285659801 +0000 UTC m=+100.973105043" watchObservedRunningTime="2025-10-13 06:48:13.286288759 +0000 UTC m=+100.973733951" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.286550 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=80.286545046 podStartE2EDuration="1m20.286545046s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.262200224 +0000 UTC m=+100.949645416" watchObservedRunningTime="2025-10-13 06:48:13.286545046 +0000 UTC m=+100.973990238" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.302355 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-8fhpj" podStartSLOduration=80.302334628 podStartE2EDuration="1m20.302334628s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.302171114 +0000 UTC m=+100.989616306" watchObservedRunningTime="2025-10-13 06:48:13.302334628 +0000 UTC m=+100.989779830" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.317864 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r8wrs" podStartSLOduration=80.317847993 podStartE2EDuration="1m20.317847993s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.31631522 +0000 UTC m=+101.003760422" watchObservedRunningTime="2025-10-13 06:48:13.317847993 +0000 UTC m=+101.005293175" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.318517 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.318558 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.318570 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.318590 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.318601 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.335414 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-bg4kt" podStartSLOduration=81.335395286 podStartE2EDuration="1m21.335395286s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.334911683 +0000 UTC m=+101.022356895" watchObservedRunningTime="2025-10-13 06:48:13.335395286 +0000 UTC m=+101.022840478" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.365507 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podStartSLOduration=81.365489039 podStartE2EDuration="1m21.365489039s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.364934514 +0000 UTC m=+101.052379716" watchObservedRunningTime="2025-10-13 06:48:13.365489039 +0000 UTC m=+101.052934231" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.407607 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-96lj2" podStartSLOduration=81.40756768 podStartE2EDuration="1m21.40756768s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.392080645 +0000 UTC m=+101.079525847" watchObservedRunningTime="2025-10-13 06:48:13.40756768 +0000 UTC m=+101.095012872" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.421522 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.421565 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.421574 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.421587 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.421597 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.433315 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=30.433299431000002 podStartE2EDuration="30.433299431s" podCreationTimestamp="2025-10-13 06:47:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:13.43255193 +0000 UTC m=+101.119997132" watchObservedRunningTime="2025-10-13 06:48:13.433299431 +0000 UTC m=+101.120744633" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.523824 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.523862 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.523870 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.523900 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.523909 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.625489 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.625522 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.625530 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.625569 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.625578 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.728210 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.728509 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.728586 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.728667 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.728731 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.831893 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.832421 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.832523 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.832589 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.832651 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.934707 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.934740 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.934748 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.934762 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:13 crc kubenswrapper[4664]: I1013 06:48:13.934770 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:13Z","lastTransitionTime":"2025-10-13T06:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.037465 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.037519 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.037535 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.037557 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.037576 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.045879 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.045943 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.045921 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:14 crc kubenswrapper[4664]: E1013 06:48:14.046024 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.045943 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:14 crc kubenswrapper[4664]: E1013 06:48:14.046087 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:14 crc kubenswrapper[4664]: E1013 06:48:14.046207 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:14 crc kubenswrapper[4664]: E1013 06:48:14.046324 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.139911 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.139946 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.139955 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.139987 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.139997 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.242397 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.242461 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.242479 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.242503 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.242520 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.344943 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.344992 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.345004 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.345022 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.345035 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.448275 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.448356 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.448368 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.448381 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.448389 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.550734 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.550778 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.550826 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.550848 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.550865 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.653519 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.653579 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.653595 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.653623 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.653641 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.755497 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.755544 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.755560 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.755584 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.755602 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.858724 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.858785 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.858816 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.858835 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.858849 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.961529 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.961596 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.961611 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.961626 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:14 crc kubenswrapper[4664]: I1013 06:48:14.961637 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:14Z","lastTransitionTime":"2025-10-13T06:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.064218 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.064263 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.064297 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.064311 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.064339 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.167056 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.167106 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.167121 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.167145 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.167161 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.269211 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.269251 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.269263 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.269282 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.269294 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.372086 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.372161 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.372191 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.372221 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.372238 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.474189 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.474244 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.474260 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.474280 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.474295 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.576143 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.576187 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.576198 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.576213 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.576224 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.678208 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.678538 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.678613 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.678715 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.678781 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.780602 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.780640 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.780653 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.780667 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.780679 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.883027 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.883084 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.883101 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.883122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.883144 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.986316 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.986737 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.986918 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.987088 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:15 crc kubenswrapper[4664]: I1013 06:48:15.987228 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:15Z","lastTransitionTime":"2025-10-13T06:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.046245 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.046262 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.046278 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.046406 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:16 crc kubenswrapper[4664]: E1013 06:48:16.046643 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:16 crc kubenswrapper[4664]: E1013 06:48:16.046757 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:16 crc kubenswrapper[4664]: E1013 06:48:16.046941 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:16 crc kubenswrapper[4664]: E1013 06:48:16.047400 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.091217 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.091292 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.091316 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.091345 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.091367 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.194774 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.194881 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.194908 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.194953 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.194976 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.297342 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.297482 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.297503 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.297529 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.297552 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.399864 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.399931 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.399953 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.399984 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.400007 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.503186 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.503245 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.503263 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.503287 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.503306 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.606421 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.606884 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.607044 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.607180 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.607343 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.710888 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.710958 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.710980 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.711006 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.711023 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.814739 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.815218 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.815419 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.815631 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.815777 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.919122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.919188 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.919209 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.919236 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:16 crc kubenswrapper[4664]: I1013 06:48:16.919257 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:16Z","lastTransitionTime":"2025-10-13T06:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.023177 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.023233 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.023250 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.023272 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.023289 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.126242 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.126635 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.126787 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.126954 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.127136 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.229781 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.230194 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.230300 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.230405 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.230497 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.333821 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.334181 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.334272 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.334366 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.334452 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.437623 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.437682 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.437698 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.437721 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.437738 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.540634 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.541241 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.541486 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.541855 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.542109 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.645334 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.645790 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.646049 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.646235 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.646374 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.749372 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.749419 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.749431 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.749451 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.749464 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.852859 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.853276 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.853418 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.853555 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.853758 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.957217 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.957293 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.957314 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.957344 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:17 crc kubenswrapper[4664]: I1013 06:48:17.957367 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:17Z","lastTransitionTime":"2025-10-13T06:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.046595 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.046602 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:18 crc kubenswrapper[4664]: E1013 06:48:18.046908 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.046618 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:18 crc kubenswrapper[4664]: E1013 06:48:18.047001 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.046625 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:18 crc kubenswrapper[4664]: E1013 06:48:18.047110 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:18 crc kubenswrapper[4664]: E1013 06:48:18.047228 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.060426 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.060584 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.060670 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.060748 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.060854 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.163723 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.163768 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.163784 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.163830 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.163847 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.267316 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.267681 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.267878 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.268031 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.268209 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.371447 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.371789 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.371933 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.372026 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.372104 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.474919 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.475341 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.475562 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.476013 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.476231 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.580075 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.580363 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.580495 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.580645 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.580861 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.684019 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.684085 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.684105 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.684129 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.684148 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.793564 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.793619 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.793640 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.793665 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.793684 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.896155 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.896253 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.896274 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.896298 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.896316 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.999614 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.999691 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:18 crc kubenswrapper[4664]: I1013 06:48:18.999711 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:18.999734 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:18.999751 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:18Z","lastTransitionTime":"2025-10-13T06:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.103355 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.103426 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.103444 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.103475 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.103503 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.206830 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.206941 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.206970 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.206995 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.207017 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.310975 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.311039 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.311066 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.311098 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.311122 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.413786 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.414276 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.414411 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.414603 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.414740 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.518200 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.518269 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.518288 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.518313 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.518329 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.621455 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.621515 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.621538 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.621568 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.621592 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.723755 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.723850 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.723878 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.723905 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.723924 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.827417 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.827511 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.827532 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.827560 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.827581 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.929946 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.930005 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.930027 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.930057 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:19 crc kubenswrapper[4664]: I1013 06:48:19.930078 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:19Z","lastTransitionTime":"2025-10-13T06:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.033264 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.033311 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.033328 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.033351 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.033368 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.046651 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.046686 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.046724 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.046687 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:20 crc kubenswrapper[4664]: E1013 06:48:20.046926 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:20 crc kubenswrapper[4664]: E1013 06:48:20.047021 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:20 crc kubenswrapper[4664]: E1013 06:48:20.047118 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:20 crc kubenswrapper[4664]: E1013 06:48:20.047241 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.136347 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.136387 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.136398 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.136414 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.136425 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.239911 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.239959 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.239984 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.240013 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.240035 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.342843 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.342881 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.342890 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.342907 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.342917 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.446169 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.446210 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.446222 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.446239 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.446250 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.549122 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.549160 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.549169 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.549185 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.549195 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.651843 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.651892 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.651906 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.651922 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.651933 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.754540 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.754593 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.754604 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.754620 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.754631 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.857027 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.857078 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.857095 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.857116 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.857130 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.960354 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.960404 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.960417 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.960436 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:20 crc kubenswrapper[4664]: I1013 06:48:20.960451 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:20Z","lastTransitionTime":"2025-10-13T06:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.047136 4664 scope.go:117] "RemoveContainer" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" Oct 13 06:48:21 crc kubenswrapper[4664]: E1013 06:48:21.047725 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mjr5r_openshift-ovn-kubernetes(74eb7029-982d-4294-bed0-63ffe7281479)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.062535 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.062588 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.062607 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.062625 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.062637 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:21Z","lastTransitionTime":"2025-10-13T06:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.166062 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.166101 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.166111 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.166126 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.166136 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:21Z","lastTransitionTime":"2025-10-13T06:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.268791 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.268958 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.268978 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.269004 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.269022 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:21Z","lastTransitionTime":"2025-10-13T06:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.372118 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.372176 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.372194 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.372218 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.372236 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:21Z","lastTransitionTime":"2025-10-13T06:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.475363 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.475831 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.476114 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.476339 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.476547 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:21Z","lastTransitionTime":"2025-10-13T06:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.486143 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.486192 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.486200 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.486215 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.486225 4664 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-13T06:48:21Z","lastTransitionTime":"2025-10-13T06:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.547123 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q"] Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.547477 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.549468 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.549638 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.550623 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.551236 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.646580 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3a3307c-f23d-48f6-93c7-5631d67bf353-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.646656 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3a3307c-f23d-48f6-93c7-5631d67bf353-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.646692 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/a3a3307c-f23d-48f6-93c7-5631d67bf353-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.646719 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/a3a3307c-f23d-48f6-93c7-5631d67bf353-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.646776 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a3a3307c-f23d-48f6-93c7-5631d67bf353-service-ca\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.748269 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3a3307c-f23d-48f6-93c7-5631d67bf353-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.748323 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3a3307c-f23d-48f6-93c7-5631d67bf353-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.748349 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/a3a3307c-f23d-48f6-93c7-5631d67bf353-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.748381 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/a3a3307c-f23d-48f6-93c7-5631d67bf353-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.748419 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a3a3307c-f23d-48f6-93c7-5631d67bf353-service-ca\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.748476 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/a3a3307c-f23d-48f6-93c7-5631d67bf353-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.748488 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/a3a3307c-f23d-48f6-93c7-5631d67bf353-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.749347 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a3a3307c-f23d-48f6-93c7-5631d67bf353-service-ca\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.756122 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3a3307c-f23d-48f6-93c7-5631d67bf353-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.764742 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3a3307c-f23d-48f6-93c7-5631d67bf353-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-n9v5q\" (UID: \"a3a3307c-f23d-48f6-93c7-5631d67bf353\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:21 crc kubenswrapper[4664]: I1013 06:48:21.873711 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" Oct 13 06:48:22 crc kubenswrapper[4664]: I1013 06:48:22.046328 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:22 crc kubenswrapper[4664]: E1013 06:48:22.046884 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:22 crc kubenswrapper[4664]: I1013 06:48:22.046397 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:22 crc kubenswrapper[4664]: I1013 06:48:22.046363 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:22 crc kubenswrapper[4664]: I1013 06:48:22.046420 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:22 crc kubenswrapper[4664]: E1013 06:48:22.047189 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:22 crc kubenswrapper[4664]: E1013 06:48:22.047874 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:22 crc kubenswrapper[4664]: E1013 06:48:22.047988 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:22 crc kubenswrapper[4664]: I1013 06:48:22.739052 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" event={"ID":"a3a3307c-f23d-48f6-93c7-5631d67bf353","Type":"ContainerStarted","Data":"4297624c7dac3d8dd651ba0598fae81aee76c1a984823e8224dc601a0bd227b6"} Oct 13 06:48:22 crc kubenswrapper[4664]: I1013 06:48:22.739140 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" event={"ID":"a3a3307c-f23d-48f6-93c7-5631d67bf353","Type":"ContainerStarted","Data":"a208626700b21c9abff907c4f96b202acc36dc58a63d11ea3a5e8e53ba0a3546"} Oct 13 06:48:22 crc kubenswrapper[4664]: I1013 06:48:22.766388 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-n9v5q" podStartSLOduration=90.766361634 podStartE2EDuration="1m30.766361634s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:22.766104316 +0000 UTC m=+110.453549598" watchObservedRunningTime="2025-10-13 06:48:22.766361634 +0000 UTC m=+110.453806866" Oct 13 06:48:24 crc kubenswrapper[4664]: I1013 06:48:24.046904 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:24 crc kubenswrapper[4664]: I1013 06:48:24.046904 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:24 crc kubenswrapper[4664]: E1013 06:48:24.047056 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:24 crc kubenswrapper[4664]: I1013 06:48:24.046917 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:24 crc kubenswrapper[4664]: E1013 06:48:24.047274 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:24 crc kubenswrapper[4664]: I1013 06:48:24.047095 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:24 crc kubenswrapper[4664]: E1013 06:48:24.047460 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:24 crc kubenswrapper[4664]: E1013 06:48:24.047536 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:26 crc kubenswrapper[4664]: I1013 06:48:26.045977 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:26 crc kubenswrapper[4664]: I1013 06:48:26.046124 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:26 crc kubenswrapper[4664]: I1013 06:48:26.046165 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:26 crc kubenswrapper[4664]: I1013 06:48:26.046250 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:26 crc kubenswrapper[4664]: E1013 06:48:26.046350 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:26 crc kubenswrapper[4664]: E1013 06:48:26.046524 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:26 crc kubenswrapper[4664]: E1013 06:48:26.046681 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:26 crc kubenswrapper[4664]: E1013 06:48:26.046777 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.046383 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:28 crc kubenswrapper[4664]: E1013 06:48:28.046591 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.046944 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:28 crc kubenswrapper[4664]: E1013 06:48:28.047053 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.047246 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:28 crc kubenswrapper[4664]: E1013 06:48:28.047335 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.050931 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:28 crc kubenswrapper[4664]: E1013 06:48:28.051155 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.760320 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/1.log" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.760920 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/0.log" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.760971 4664 generic.go:334] "Generic (PLEG): container finished" podID="2f22066f-5783-48bc-85f8-0fbb2eed7e0b" containerID="2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88" exitCode=1 Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.761004 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerDied","Data":"2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88"} Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.761043 4664 scope.go:117] "RemoveContainer" containerID="cfe1798188d1a1d242f972912c2cba875266e67b2549c4b36a4322df2f6d2a37" Oct 13 06:48:28 crc kubenswrapper[4664]: I1013 06:48:28.761460 4664 scope.go:117] "RemoveContainer" containerID="2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88" Oct 13 06:48:28 crc kubenswrapper[4664]: E1013 06:48:28.761620 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-bg4kt_openshift-multus(2f22066f-5783-48bc-85f8-0fbb2eed7e0b)\"" pod="openshift-multus/multus-bg4kt" podUID="2f22066f-5783-48bc-85f8-0fbb2eed7e0b" Oct 13 06:48:29 crc kubenswrapper[4664]: I1013 06:48:29.766470 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/1.log" Oct 13 06:48:30 crc kubenswrapper[4664]: I1013 06:48:30.046947 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:30 crc kubenswrapper[4664]: I1013 06:48:30.046996 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:30 crc kubenswrapper[4664]: I1013 06:48:30.047041 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:30 crc kubenswrapper[4664]: I1013 06:48:30.047002 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:30 crc kubenswrapper[4664]: E1013 06:48:30.047095 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:30 crc kubenswrapper[4664]: E1013 06:48:30.047300 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:30 crc kubenswrapper[4664]: E1013 06:48:30.047342 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:30 crc kubenswrapper[4664]: E1013 06:48:30.047449 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:32 crc kubenswrapper[4664]: I1013 06:48:32.046816 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:32 crc kubenswrapper[4664]: E1013 06:48:32.046975 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:32 crc kubenswrapper[4664]: I1013 06:48:32.046819 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:32 crc kubenswrapper[4664]: E1013 06:48:32.047233 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:32 crc kubenswrapper[4664]: I1013 06:48:32.047426 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:32 crc kubenswrapper[4664]: E1013 06:48:32.047491 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:32 crc kubenswrapper[4664]: I1013 06:48:32.047747 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:32 crc kubenswrapper[4664]: E1013 06:48:32.047970 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:33 crc kubenswrapper[4664]: E1013 06:48:33.046059 4664 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Oct 13 06:48:33 crc kubenswrapper[4664]: E1013 06:48:33.145595 4664 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 13 06:48:34 crc kubenswrapper[4664]: I1013 06:48:34.046614 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:34 crc kubenswrapper[4664]: E1013 06:48:34.046895 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:34 crc kubenswrapper[4664]: I1013 06:48:34.047043 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:34 crc kubenswrapper[4664]: E1013 06:48:34.047225 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:34 crc kubenswrapper[4664]: I1013 06:48:34.047319 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:34 crc kubenswrapper[4664]: E1013 06:48:34.047461 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:34 crc kubenswrapper[4664]: I1013 06:48:34.047903 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:34 crc kubenswrapper[4664]: E1013 06:48:34.048195 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.046276 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.046278 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.046347 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:36 crc kubenswrapper[4664]: E1013 06:48:36.047366 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.046378 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:36 crc kubenswrapper[4664]: E1013 06:48:36.047156 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.047163 4664 scope.go:117] "RemoveContainer" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" Oct 13 06:48:36 crc kubenswrapper[4664]: E1013 06:48:36.047501 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:36 crc kubenswrapper[4664]: E1013 06:48:36.047638 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.801633 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/3.log" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.803825 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerStarted","Data":"9a5f3e1290eef021a5e3fc50abae0cb852921600c76ca982f2e44128e9c91671"} Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.837548 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podStartSLOduration=104.837529548 podStartE2EDuration="1m44.837529548s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:36.835209303 +0000 UTC m=+124.522654525" watchObservedRunningTime="2025-10-13 06:48:36.837529548 +0000 UTC m=+124.524974760" Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.950172 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-9mgbt"] Oct 13 06:48:36 crc kubenswrapper[4664]: I1013 06:48:36.950623 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:36 crc kubenswrapper[4664]: E1013 06:48:36.950725 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:37 crc kubenswrapper[4664]: I1013 06:48:37.137404 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:48:38 crc kubenswrapper[4664]: I1013 06:48:38.046315 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:38 crc kubenswrapper[4664]: I1013 06:48:38.046345 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:38 crc kubenswrapper[4664]: I1013 06:48:38.046314 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:38 crc kubenswrapper[4664]: E1013 06:48:38.046445 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:38 crc kubenswrapper[4664]: E1013 06:48:38.046501 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:38 crc kubenswrapper[4664]: E1013 06:48:38.046552 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:38 crc kubenswrapper[4664]: E1013 06:48:38.146939 4664 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 13 06:48:39 crc kubenswrapper[4664]: I1013 06:48:39.046084 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:39 crc kubenswrapper[4664]: E1013 06:48:39.046494 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:40 crc kubenswrapper[4664]: I1013 06:48:40.046616 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:40 crc kubenswrapper[4664]: I1013 06:48:40.046704 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:40 crc kubenswrapper[4664]: I1013 06:48:40.046729 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:40 crc kubenswrapper[4664]: E1013 06:48:40.046880 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:40 crc kubenswrapper[4664]: E1013 06:48:40.047019 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:40 crc kubenswrapper[4664]: E1013 06:48:40.047184 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:41 crc kubenswrapper[4664]: I1013 06:48:41.046070 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:41 crc kubenswrapper[4664]: E1013 06:48:41.046549 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:41 crc kubenswrapper[4664]: I1013 06:48:41.046581 4664 scope.go:117] "RemoveContainer" containerID="2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88" Oct 13 06:48:41 crc kubenswrapper[4664]: I1013 06:48:41.821350 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/1.log" Oct 13 06:48:41 crc kubenswrapper[4664]: I1013 06:48:41.821749 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerStarted","Data":"9fe810f7334c79a98146a6969e8b72c587dd4a4f43578c4792c4fabb4d4334e5"} Oct 13 06:48:42 crc kubenswrapper[4664]: I1013 06:48:42.046220 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:42 crc kubenswrapper[4664]: E1013 06:48:42.046420 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 13 06:48:42 crc kubenswrapper[4664]: I1013 06:48:42.046707 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:42 crc kubenswrapper[4664]: E1013 06:48:42.046842 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 13 06:48:42 crc kubenswrapper[4664]: I1013 06:48:42.047104 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:42 crc kubenswrapper[4664]: E1013 06:48:42.047250 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 13 06:48:43 crc kubenswrapper[4664]: I1013 06:48:43.046985 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:43 crc kubenswrapper[4664]: E1013 06:48:43.050764 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9mgbt" podUID="eba49cc7-48bb-4372-8eb3-c88513c591b9" Oct 13 06:48:44 crc kubenswrapper[4664]: I1013 06:48:44.046252 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:48:44 crc kubenswrapper[4664]: I1013 06:48:44.046293 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:48:44 crc kubenswrapper[4664]: I1013 06:48:44.046678 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:48:44 crc kubenswrapper[4664]: I1013 06:48:44.048746 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Oct 13 06:48:44 crc kubenswrapper[4664]: I1013 06:48:44.048977 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Oct 13 06:48:44 crc kubenswrapper[4664]: I1013 06:48:44.049177 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Oct 13 06:48:44 crc kubenswrapper[4664]: I1013 06:48:44.049762 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Oct 13 06:48:45 crc kubenswrapper[4664]: I1013 06:48:45.046138 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:48:45 crc kubenswrapper[4664]: I1013 06:48:45.049179 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Oct 13 06:48:45 crc kubenswrapper[4664]: I1013 06:48:45.049252 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.248827 4664 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.351004 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.351546 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.353403 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-f66qq"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.354269 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.354379 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.355093 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.355727 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-s7fbf"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.361440 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.368538 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.368574 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.368581 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.368848 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.369081 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.369311 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.372008 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv"] Oct 13 06:48:52 crc kubenswrapper[4664]: W1013 06:48:52.372653 4664 reflector.go:561] object-"openshift-controller-manager"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Oct 13 06:48:52 crc kubenswrapper[4664]: E1013 06:48:52.372708 4664 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 13 06:48:52 crc kubenswrapper[4664]: W1013 06:48:52.372781 4664 reflector.go:561] object-"openshift-controller-manager"/"openshift-global-ca": failed to list *v1.ConfigMap: configmaps "openshift-global-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Oct 13 06:48:52 crc kubenswrapper[4664]: E1013 06:48:52.372808 4664 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-global-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-global-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.372909 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: W1013 06:48:52.373110 4664 reflector.go:561] object-"openshift-controller-manager"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Oct 13 06:48:52 crc kubenswrapper[4664]: E1013 06:48:52.373134 4664 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.373249 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.373250 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: W1013 06:48:52.373561 4664 reflector.go:561] object-"openshift-controller-manager"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Oct 13 06:48:52 crc kubenswrapper[4664]: E1013 06:48:52.373602 4664 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.373732 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.373799 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: W1013 06:48:52.373928 4664 reflector.go:561] object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c": failed to list *v1.Secret: secrets "openshift-controller-manager-sa-dockercfg-msq4c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.374487 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: E1013 06:48:52.373956 4664 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-msq4c\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-controller-manager-sa-dockercfg-msq4c\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.387678 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.387933 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.388282 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.388466 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.388770 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-dh2vl"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389102 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389267 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389346 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389462 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389615 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389776 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389789 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.389945 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.390047 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.390254 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-nhbkw"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.390463 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.390618 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.390698 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.391158 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.391001 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.391398 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.391684 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.390765 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-dh2vl" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.392301 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.392933 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.393108 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.393524 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.413552 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.413925 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.414084 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.414193 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.414219 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.414311 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.414630 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.415454 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.415636 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.416035 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.416471 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.416566 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zjsrt"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.416939 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418097 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418181 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418280 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418508 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418575 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418666 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418754 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418806 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418904 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418954 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.418516 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.419086 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.419468 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.420008 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.420022 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.422605 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2vkt4"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.423177 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-47mrj"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.423526 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.423913 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427226 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427274 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/55a00b0d-9ade-48a5-963f-fd17e913ef4b-audit-dir\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427296 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-trusted-ca-bundle\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427322 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfxqw\" (UniqueName: \"kubernetes.io/projected/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-kube-api-access-cfxqw\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427341 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/65efd88f-be68-494a-a3b8-3a1b2df263d9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427366 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-config\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427383 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vf4b\" (UniqueName: \"kubernetes.io/projected/55a00b0d-9ade-48a5-963f-fd17e913ef4b-kube-api-access-2vf4b\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427420 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-audit\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427439 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-etcd-client\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427459 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-serving-cert\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427476 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5hdt\" (UniqueName: \"kubernetes.io/projected/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-kube-api-access-h5hdt\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427497 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-client-ca\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427512 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-encryption-config\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427530 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-config\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427547 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/37817d7f-3ba8-4e24-844d-72a8860dd693-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-468sf\" (UID: \"37817d7f-3ba8-4e24-844d-72a8860dd693\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427565 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-config\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427583 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fr7r\" (UniqueName: \"kubernetes.io/projected/37817d7f-3ba8-4e24-844d-72a8860dd693-kube-api-access-8fr7r\") pod \"cluster-samples-operator-665b6dd947-468sf\" (UID: \"37817d7f-3ba8-4e24-844d-72a8860dd693\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427602 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-etcd-client\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427621 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-audit-dir\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427639 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psxnr\" (UniqueName: \"kubernetes.io/projected/b3022adf-c38f-450e-9f28-4581365f36e9-kube-api-access-psxnr\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427658 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427677 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427703 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427724 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/af35da51-424c-475c-8306-87c51913a3f9-auth-proxy-config\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427744 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-images\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427760 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnrv7\" (UniqueName: \"kubernetes.io/projected/d981b25e-c830-4f47-9851-d51db10ed5bf-kube-api-access-lnrv7\") pod \"downloads-7954f5f757-dh2vl\" (UID: \"d981b25e-c830-4f47-9851-d51db10ed5bf\") " pod="openshift-console/downloads-7954f5f757-dh2vl" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427778 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b3022adf-c38f-450e-9f28-4581365f36e9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427805 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qljbh\" (UniqueName: \"kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427823 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65efd88f-be68-494a-a3b8-3a1b2df263d9-serving-cert\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427855 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3022adf-c38f-450e-9f28-4581365f36e9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427876 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-client-ca\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427892 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b3022adf-c38f-450e-9f28-4581365f36e9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427913 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-audit-policies\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427929 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-image-import-ca\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427946 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af35da51-424c-475c-8306-87c51913a3f9-config\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427964 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bp4m\" (UniqueName: \"kubernetes.io/projected/af35da51-424c-475c-8306-87c51913a3f9-kube-api-access-9bp4m\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427982 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43c15db-46da-4cc0-b0fe-0ffaee273be6-serving-cert\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.427997 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-config\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428013 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvzql\" (UniqueName: \"kubernetes.io/projected/65efd88f-be68-494a-a3b8-3a1b2df263d9-kube-api-access-qvzql\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428031 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-etcd-serving-ca\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428046 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-serving-cert\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428064 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26jqd\" (UniqueName: \"kubernetes.io/projected/c43c15db-46da-4cc0-b0fe-0ffaee273be6-kube-api-access-26jqd\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428087 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/af35da51-424c-475c-8306-87c51913a3f9-machine-approver-tls\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428102 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-encryption-config\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428118 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.428133 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-node-pullsecrets\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.431355 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.432923 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.433185 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.433595 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.433841 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.433885 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.434137 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.434299 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.434436 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.434511 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-ml2tj"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.435113 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.435201 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.435629 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.436709 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.436800 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gkm66"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.437021 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.437108 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.437152 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.437401 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.437644 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.453187 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.453637 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.454295 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.454977 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.455126 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.455227 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.455777 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.455978 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.456094 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.456178 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.472554 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.479657 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6bz9t"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.481343 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.481854 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.482321 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.482560 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.482704 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.482847 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.483339 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.483500 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.483744 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.484215 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.484523 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tfpbq"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.484839 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.486351 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.487517 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.487812 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.487884 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.490127 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.516194 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.517050 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.517150 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.517317 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.520882 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.521563 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-4bxj9"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.521576 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.522092 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.522520 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.522843 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.526158 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.528657 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.526948 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529094 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.527773 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529390 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529484 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529548 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b3022adf-c38f-450e-9f28-4581365f36e9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529573 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/af35da51-424c-475c-8306-87c51913a3f9-auth-proxy-config\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529593 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-images\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529615 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnrv7\" (UniqueName: \"kubernetes.io/projected/d981b25e-c830-4f47-9851-d51db10ed5bf-kube-api-access-lnrv7\") pod \"downloads-7954f5f757-dh2vl\" (UID: \"d981b25e-c830-4f47-9851-d51db10ed5bf\") " pod="openshift-console/downloads-7954f5f757-dh2vl" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529642 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39908d1a-79af-485a-8deb-43f03552b3d1-trusted-ca\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529667 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-client-ca\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529669 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529685 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qljbh\" (UniqueName: \"kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529739 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65efd88f-be68-494a-a3b8-3a1b2df263d9-serving-cert\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529759 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3022adf-c38f-450e-9f28-4581365f36e9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529776 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b3022adf-c38f-450e-9f28-4581365f36e9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529806 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-audit-policies\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529848 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-image-import-ca\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529864 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-config\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529903 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af35da51-424c-475c-8306-87c51913a3f9-config\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529920 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bp4m\" (UniqueName: \"kubernetes.io/projected/af35da51-424c-475c-8306-87c51913a3f9-kube-api-access-9bp4m\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529935 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43c15db-46da-4cc0-b0fe-0ffaee273be6-serving-cert\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529954 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvzql\" (UniqueName: \"kubernetes.io/projected/65efd88f-be68-494a-a3b8-3a1b2df263d9-kube-api-access-qvzql\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529973 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f93db1a-5cc4-491b-ac61-3e679b9e9686-serving-cert\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.529994 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-etcd-serving-ca\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530011 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-serving-cert\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530032 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48g5b\" (UniqueName: \"kubernetes.io/projected/3f93db1a-5cc4-491b-ac61-3e679b9e9686-kube-api-access-48g5b\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530053 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26jqd\" (UniqueName: \"kubernetes.io/projected/c43c15db-46da-4cc0-b0fe-0ffaee273be6-kube-api-access-26jqd\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530069 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-encryption-config\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530098 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/af35da51-424c-475c-8306-87c51913a3f9-machine-approver-tls\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530118 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530141 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-node-pullsecrets\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530170 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/55a00b0d-9ade-48a5-963f-fd17e913ef4b-audit-dir\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530191 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530209 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530228 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-trusted-ca-bundle\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530246 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530268 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530288 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfxqw\" (UniqueName: \"kubernetes.io/projected/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-kube-api-access-cfxqw\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530309 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/65efd88f-be68-494a-a3b8-3a1b2df263d9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530330 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-client\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530355 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-config\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530371 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-service-ca\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530407 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vf4b\" (UniqueName: \"kubernetes.io/projected/55a00b0d-9ade-48a5-963f-fd17e913ef4b-kube-api-access-2vf4b\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530422 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-config\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530441 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-ca\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530459 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-audit\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530477 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5hdt\" (UniqueName: \"kubernetes.io/projected/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-kube-api-access-h5hdt\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530493 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-etcd-client\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530508 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-serving-cert\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530523 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39908d1a-79af-485a-8deb-43f03552b3d1-serving-cert\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530541 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-client-ca\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530559 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-encryption-config\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530555 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-images\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530577 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlbsp\" (UniqueName: \"kubernetes.io/projected/39908d1a-79af-485a-8deb-43f03552b3d1-kube-api-access-wlbsp\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.530637 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-config\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.531312 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af35da51-424c-475c-8306-87c51913a3f9-config\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.531360 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.531419 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-config\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.532285 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-client-ca\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.532402 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/37817d7f-3ba8-4e24-844d-72a8860dd693-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-468sf\" (UID: \"37817d7f-3ba8-4e24-844d-72a8860dd693\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.532437 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-config\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.532467 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39908d1a-79af-485a-8deb-43f03552b3d1-config\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.533359 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.536115 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-audit-policies\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.537204 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.537300 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-etcd-serving-ca\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.540413 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.540917 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.541229 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.541643 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.541944 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.542139 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.542280 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.542402 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.543705 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.544365 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.544647 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vpv87"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.546987 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65efd88f-be68-494a-a3b8-3a1b2df263d9-serving-cert\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.547564 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-config\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.547976 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.548300 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.548550 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.548661 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.555037 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-serving-cert\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.555128 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/55a00b0d-9ade-48a5-963f-fd17e913ef4b-audit-dir\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.563885 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-node-pullsecrets\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.563969 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.564063 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-image-import-ca\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.564582 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/af35da51-424c-475c-8306-87c51913a3f9-auth-proxy-config\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.564631 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b3022adf-c38f-450e-9f28-4581365f36e9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.565255 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-trusted-ca-bundle\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.565396 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.565907 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/65efd88f-be68-494a-a3b8-3a1b2df263d9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.566041 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.575340 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-p6fvb"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.576235 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-config\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.577759 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-audit\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.579982 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-client-ca\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.584377 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/37817d7f-3ba8-4e24-844d-72a8860dd693-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-468sf\" (UID: \"37817d7f-3ba8-4e24-844d-72a8860dd693\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.584749 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/af35da51-424c-475c-8306-87c51913a3f9-machine-approver-tls\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.584966 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-encryption-config\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.585260 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.586652 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-config\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.586972 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t4v67"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.592767 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fr7r\" (UniqueName: \"kubernetes.io/projected/37817d7f-3ba8-4e24-844d-72a8860dd693-kube-api-access-8fr7r\") pod \"cluster-samples-operator-665b6dd947-468sf\" (UID: \"37817d7f-3ba8-4e24-844d-72a8860dd693\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.593228 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.593324 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-etcd-client\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.593378 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-audit-dir\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.593475 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psxnr\" (UniqueName: \"kubernetes.io/projected/b3022adf-c38f-450e-9f28-4581365f36e9-kube-api-access-psxnr\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.593522 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.593640 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-audit-dir\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.593862 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-f67l8"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.594443 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.594439 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43c15db-46da-4cc0-b0fe-0ffaee273be6-serving-cert\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.594634 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.595013 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.599642 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b3022adf-c38f-450e-9f28-4581365f36e9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.606248 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.606377 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.606801 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-etcd-client\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.607264 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-serving-cert\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.607283 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.608163 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.609249 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.609800 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.610200 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-encryption-config\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.610468 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.612292 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.612815 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/55a00b0d-9ade-48a5-963f-fd17e913ef4b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.615306 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/55a00b0d-9ade-48a5-963f-fd17e913ef4b-etcd-client\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.616084 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-z4pxt"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.616198 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.637932 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.638186 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2vkt4"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.638255 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-dh2vl"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.638335 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tfpbq"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.638471 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.641313 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.649720 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.654883 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.656769 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.663436 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.667468 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gkm66"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.676850 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-s7fbf"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.678433 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.680244 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-f66qq"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.682909 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.689752 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.693305 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.693948 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48g5b\" (UniqueName: \"kubernetes.io/projected/3f93db1a-5cc4-491b-ac61-3e679b9e9686-kube-api-access-48g5b\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694090 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694172 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694253 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-client\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694329 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694407 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-service-ca\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694484 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-config\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694556 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-ca\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694652 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39908d1a-79af-485a-8deb-43f03552b3d1-serving-cert\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694720 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlbsp\" (UniqueName: \"kubernetes.io/projected/39908d1a-79af-485a-8deb-43f03552b3d1-kube-api-access-wlbsp\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694855 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39908d1a-79af-485a-8deb-43f03552b3d1-config\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.694958 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39908d1a-79af-485a-8deb-43f03552b3d1-trusted-ca\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.695051 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f93db1a-5cc4-491b-ac61-3e679b9e9686-serving-cert\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.696553 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-config\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.696569 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-service-ca\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.696801 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.697191 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-ca\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.706315 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.706398 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-lfw8h"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.706433 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3f93db1a-5cc4-491b-ac61-3e679b9e9686-etcd-client\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.706616 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f93db1a-5cc4-491b-ac61-3e679b9e9686-serving-cert\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.707160 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.707959 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.710021 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-v4pkt"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.711197 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.713133 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.715586 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-nhbkw"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.724900 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zjsrt"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.724962 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.732231 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.735079 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.735365 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6bz9t"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.736617 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.738256 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.739139 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.740763 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.743563 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.745140 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-47mrj"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.747479 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.747576 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.755686 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ml2tj"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.755727 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.756747 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.759953 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t4v67"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.760050 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-2chwr"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.761001 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-f67l8"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.761131 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.761512 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.761812 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.763667 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.765103 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-lfw8h"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.769390 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39908d1a-79af-485a-8deb-43f03552b3d1-serving-cert\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.775075 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-p6fvb"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.776076 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vpv87"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.787468 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.790725 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.791326 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-v4pkt"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.799693 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39908d1a-79af-485a-8deb-43f03552b3d1-trusted-ca\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.800226 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.801982 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2chwr"] Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.814198 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.834394 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.837027 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39908d1a-79af-485a-8deb-43f03552b3d1-config\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.853569 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.874031 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.913695 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.935318 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.954307 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Oct 13 06:48:52 crc kubenswrapper[4664]: I1013 06:48:52.974090 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.001018 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.014873 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.034911 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.055292 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.073999 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.095173 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.115426 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.136974 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.154117 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.175382 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.194694 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.213882 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.234168 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.254324 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.273664 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.294130 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.314990 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.357672 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bp4m\" (UniqueName: \"kubernetes.io/projected/af35da51-424c-475c-8306-87c51913a3f9-kube-api-access-9bp4m\") pod \"machine-approver-56656f9798-dn8d4\" (UID: \"af35da51-424c-475c-8306-87c51913a3f9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.372438 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnrv7\" (UniqueName: \"kubernetes.io/projected/d981b25e-c830-4f47-9851-d51db10ed5bf-kube-api-access-lnrv7\") pod \"downloads-7954f5f757-dh2vl\" (UID: \"d981b25e-c830-4f47-9851-d51db10ed5bf\") " pod="openshift-console/downloads-7954f5f757-dh2vl" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.391891 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-dh2vl" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.416005 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.417115 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvzql\" (UniqueName: \"kubernetes.io/projected/65efd88f-be68-494a-a3b8-3a1b2df263d9-kube-api-access-qvzql\") pod \"openshift-config-operator-7777fb866f-gcdzv\" (UID: \"65efd88f-be68-494a-a3b8-3a1b2df263d9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.434613 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.455376 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.473694 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.494385 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.561025 4664 request.go:700] Waited for 1.018328874s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/secrets?fieldSelector=metadata.name%3Dkube-apiserver-operator-serving-cert&limit=500&resourceVersion=0 Oct 13 06:48:53 crc kubenswrapper[4664]: E1013 06:48:53.561280 4664 secret.go:188] Couldn't get secret openshift-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Oct 13 06:48:53 crc kubenswrapper[4664]: E1013 06:48:53.563049 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert podName:79847ce1-e701-447b-b9d1-a0609b0b09ab nodeName:}" failed. No retries permitted until 2025-10-13 06:48:54.062955926 +0000 UTC m=+141.750401158 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert") pod "controller-manager-879f6c89f-f66qq" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab") : failed to sync secret cache: timed out waiting for the condition Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.564345 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.564552 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.565159 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Oct 13 06:48:53 crc kubenswrapper[4664]: E1013 06:48:53.572906 4664 configmap.go:193] Couldn't get configMap openshift-controller-manager/openshift-global-ca: failed to sync configmap cache: timed out waiting for the condition Oct 13 06:48:53 crc kubenswrapper[4664]: E1013 06:48:53.573174 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles podName:79847ce1-e701-447b-b9d1-a0609b0b09ab nodeName:}" failed. No retries permitted until 2025-10-13 06:48:54.07309252 +0000 UTC m=+141.760537742 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles") pod "controller-manager-879f6c89f-f66qq" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab") : failed to sync configmap cache: timed out waiting for the condition Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.574145 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.594542 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.613486 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.615878 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.634052 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.638296 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.655215 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.675633 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.696932 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.707149 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-dh2vl"] Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.715388 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: W1013 06:48:53.718975 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd981b25e_c830_4f47_9851_d51db10ed5bf.slice/crio-449bdeb3d5b7abf5a003441724888cd4c1e43a0bfa72cc3ebb7372f5dc073ae5 WatchSource:0}: Error finding container 449bdeb3d5b7abf5a003441724888cd4c1e43a0bfa72cc3ebb7372f5dc073ae5: Status 404 returned error can't find the container with id 449bdeb3d5b7abf5a003441724888cd4c1e43a0bfa72cc3ebb7372f5dc073ae5 Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.752164 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b3022adf-c38f-450e-9f28-4581365f36e9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.771592 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26jqd\" (UniqueName: \"kubernetes.io/projected/c43c15db-46da-4cc0-b0fe-0ffaee273be6-kube-api-access-26jqd\") pod \"route-controller-manager-6576b87f9c-6tklh\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.775200 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.794543 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.808857 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv"] Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.814016 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Oct 13 06:48:53 crc kubenswrapper[4664]: W1013 06:48:53.819738 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65efd88f_be68_494a_a3b8_3a1b2df263d9.slice/crio-41bc4e9bcf198810e8a1ae3d3c5ddaa3ef40becae7f9138633e6e206ea502a81 WatchSource:0}: Error finding container 41bc4e9bcf198810e8a1ae3d3c5ddaa3ef40becae7f9138633e6e206ea502a81: Status 404 returned error can't find the container with id 41bc4e9bcf198810e8a1ae3d3c5ddaa3ef40becae7f9138633e6e206ea502a81 Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.834463 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.867669 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" event={"ID":"65efd88f-be68-494a-a3b8-3a1b2df263d9","Type":"ContainerStarted","Data":"41bc4e9bcf198810e8a1ae3d3c5ddaa3ef40becae7f9138633e6e206ea502a81"} Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.869303 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" event={"ID":"af35da51-424c-475c-8306-87c51913a3f9","Type":"ContainerStarted","Data":"b895aaf42418484ca38ef94fa802d2c0b601a8b8cc5a7314dd7aadd13c7e7046"} Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.873223 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfxqw\" (UniqueName: \"kubernetes.io/projected/3881c61c-1e8b-437c-85b1-6bade9f8f4f9-kube-api-access-cfxqw\") pod \"machine-api-operator-5694c8668f-nhbkw\" (UID: \"3881c61c-1e8b-437c-85b1-6bade9f8f4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.873898 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.875964 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dh2vl" event={"ID":"d981b25e-c830-4f47-9851-d51db10ed5bf","Type":"ContainerStarted","Data":"f953a7a2b6c6c64ba3135752f4c0a4e12d7ccb0fa10df5fc51d26cd405d9e170"} Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.876120 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dh2vl" event={"ID":"d981b25e-c830-4f47-9851-d51db10ed5bf","Type":"ContainerStarted","Data":"449bdeb3d5b7abf5a003441724888cd4c1e43a0bfa72cc3ebb7372f5dc073ae5"} Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.877168 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-dh2vl" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.878481 4664 patch_prober.go:28] interesting pod/downloads-7954f5f757-dh2vl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.878592 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dh2vl" podUID="d981b25e-c830-4f47-9851-d51db10ed5bf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.894054 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.914153 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.934018 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.953751 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.955276 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.983020 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" Oct 13 06:48:53 crc kubenswrapper[4664]: I1013 06:48:53.992158 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vf4b\" (UniqueName: \"kubernetes.io/projected/55a00b0d-9ade-48a5-963f-fd17e913ef4b-kube-api-access-2vf4b\") pod \"apiserver-7bbb656c7d-97b4k\" (UID: \"55a00b0d-9ade-48a5-963f-fd17e913ef4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.009510 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5hdt\" (UniqueName: \"kubernetes.io/projected/fa3d6ad9-c819-4a98-9ded-9371e6259f9b-kube-api-access-h5hdt\") pod \"apiserver-76f77b778f-s7fbf\" (UID: \"fa3d6ad9-c819-4a98-9ded-9371e6259f9b\") " pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.030692 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fr7r\" (UniqueName: \"kubernetes.io/projected/37817d7f-3ba8-4e24-844d-72a8860dd693-kube-api-access-8fr7r\") pod \"cluster-samples-operator-665b6dd947-468sf\" (UID: \"37817d7f-3ba8-4e24-844d-72a8860dd693\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.034493 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.055064 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.074463 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.078373 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.094274 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.114593 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.117440 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.117472 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.134429 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.162380 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.174416 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.184901 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh"] Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.191171 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.195222 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.214253 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.227423 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.232262 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-nhbkw"] Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.233184 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.255638 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.278566 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.294277 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.314058 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.354935 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psxnr\" (UniqueName: \"kubernetes.io/projected/b3022adf-c38f-450e-9f28-4581365f36e9-kube-api-access-psxnr\") pod \"cluster-image-registry-operator-dc59b4c8b-6w5q7\" (UID: \"b3022adf-c38f-450e-9f28-4581365f36e9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.355650 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.374767 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.399642 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Oct 13 06:48:54 crc kubenswrapper[4664]: E1013 06:48:54.412231 4664 projected.go:288] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.418115 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.439468 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.449042 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf"] Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.477997 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48g5b\" (UniqueName: \"kubernetes.io/projected/3f93db1a-5cc4-491b-ac61-3e679b9e9686-kube-api-access-48g5b\") pod \"etcd-operator-b45778765-2vkt4\" (UID: \"3f93db1a-5cc4-491b-ac61-3e679b9e9686\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.490852 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.498381 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf40d27a-b0f2-4169-ad05-5ff9e4d46595-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nnfdv\" (UID: \"cf40d27a-b0f2-4169-ad05-5ff9e4d46595\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.521685 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.533251 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlbsp\" (UniqueName: \"kubernetes.io/projected/39908d1a-79af-485a-8deb-43f03552b3d1-kube-api-access-wlbsp\") pod \"console-operator-58897d9998-6bz9t\" (UID: \"39908d1a-79af-485a-8deb-43f03552b3d1\") " pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.535400 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.554660 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.572710 4664 request.go:700] Waited for 1.861198336s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/hostpath-provisioner/secrets?fieldSelector=metadata.name%3Dcsi-hostpath-provisioner-sa-dockercfg-qd74k&limit=500&resourceVersion=0 Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.574454 4664 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.582819 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.583933 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.598059 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.603374 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-s7fbf"] Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.615554 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.634707 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.656328 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.674089 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.695006 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.725733 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fgxc\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-kube-api-access-9fgxc\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.725778 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-console-config\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.725843 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-config\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.725877 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.725898 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-serving-cert\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726176 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/821e4208-46ce-483c-a06f-83d5e8d74cf0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726273 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726318 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-trusted-ca\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726348 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726401 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e5fdb8ec-d75b-4d58-9f56-1952df83cc29-metrics-tls\") pod \"dns-operator-744455d44c-zjsrt\" (UID: \"e5fdb8ec-d75b-4d58-9f56-1952df83cc29\") " pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726425 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-certificates\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726442 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726522 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726563 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-policies\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726614 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726643 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726676 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rdch\" (UniqueName: \"kubernetes.io/projected/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-kube-api-access-7rdch\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726739 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726764 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-trusted-ca-bundle\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726788 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws7fp\" (UniqueName: \"kubernetes.io/projected/fb673675-43af-441e-9cf3-f5f283ef9558-kube-api-access-ws7fp\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726836 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-service-ca-bundle\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726856 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726912 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726930 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-oauth-config\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.726954 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nq77\" (UniqueName: \"kubernetes.io/projected/e5fdb8ec-d75b-4d58-9f56-1952df83cc29-kube-api-access-4nq77\") pod \"dns-operator-744455d44c-zjsrt\" (UID: \"e5fdb8ec-d75b-4d58-9f56-1952df83cc29\") " pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727003 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-serving-cert\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727034 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-service-ca\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727065 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727082 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-oauth-serving-cert\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727098 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-dir\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727116 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727158 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7kmv\" (UniqueName: \"kubernetes.io/projected/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-kube-api-access-h7kmv\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727173 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727192 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-bound-sa-token\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727206 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-tls\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727233 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727249 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727267 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/821e4208-46ce-483c-a06f-83d5e8d74cf0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727296 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727315 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727344 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lprqh\" (UniqueName: \"kubernetes.io/projected/821e4208-46ce-483c-a06f-83d5e8d74cf0-kube-api-access-lprqh\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.727362 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tc9f\" (UniqueName: \"kubernetes.io/projected/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-kube-api-access-6tc9f\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.733540 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k"] Oct 13 06:48:54 crc kubenswrapper[4664]: E1013 06:48:54.735629 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.235610766 +0000 UTC m=+142.923055958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.744149 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: E1013 06:48:54.758306 4664 projected.go:194] Error preparing data for projected volume kube-api-access-qljbh for pod openshift-controller-manager/controller-manager-879f6c89f-f66qq: failed to sync configmap cache: timed out waiting for the condition Oct 13 06:48:54 crc kubenswrapper[4664]: E1013 06:48:54.758404 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh podName:79847ce1-e701-447b-b9d1-a0609b0b09ab nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.258363964 +0000 UTC m=+142.945809156 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-qljbh" (UniqueName: "kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh") pod "controller-manager-879f6c89f-f66qq" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab") : failed to sync configmap cache: timed out waiting for the condition Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.764717 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.774251 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.786446 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7"] Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.813497 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.818591 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.820568 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828371 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828510 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828537 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828556 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fgxc\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-kube-api-access-9fgxc\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: E1013 06:48:54.828604 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.328567263 +0000 UTC m=+143.016012485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828655 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-console-config\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828709 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kc82s\" (UniqueName: \"kubernetes.io/projected/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-kube-api-access-kc82s\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828744 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828776 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9acbdb51-299c-4f92-9dbc-72cc388b9985-proxy-tls\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828842 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c8969bc0-07ec-44a0-98fd-03669c3557db-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828890 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d-cert\") pod \"ingress-canary-2chwr\" (UID: \"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d\") " pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828927 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/821e4208-46ce-483c-a06f-83d5e8d74cf0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.828955 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q89hc\" (UniqueName: \"kubernetes.io/projected/207d72d8-7daf-4223-9b7a-25c4edfdb490-kube-api-access-q89hc\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829006 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829029 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89073c83-c4b0-460e-8011-433081541325-config-volume\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829052 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e5fdb8ec-d75b-4d58-9f56-1952df83cc29-metrics-tls\") pod \"dns-operator-744455d44c-zjsrt\" (UID: \"e5fdb8ec-d75b-4d58-9f56-1952df83cc29\") " pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829083 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-certificates\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829106 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13260ddb-8d39-4a5c-bc14-15838dc93ff1-node-bootstrap-token\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829193 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829217 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a8757a02-6c3c-4f30-9ede-32cc55c8b616-trusted-ca\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829239 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89073c83-c4b0-460e-8011-433081541325-secret-volume\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829257 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-csi-data-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829277 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-policies\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829298 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1f021cde-f007-411e-95c6-6be5f2bc10a4-config-volume\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829319 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829336 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stdqf\" (UniqueName: \"kubernetes.io/projected/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-kube-api-access-stdqf\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829356 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/425e3312-93c3-42cc-a840-a6cbb635e244-tmpfs\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829373 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13260ddb-8d39-4a5c-bc14-15838dc93ff1-certs\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829389 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-mountpoint-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829409 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rdch\" (UniqueName: \"kubernetes.io/projected/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-kube-api-access-7rdch\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829434 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-trusted-ca-bundle\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829452 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws7fp\" (UniqueName: \"kubernetes.io/projected/fb673675-43af-441e-9cf3-f5f283ef9558-kube-api-access-ws7fp\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829469 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a8757a02-6c3c-4f30-9ede-32cc55c8b616-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829488 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1d87b0ff-e00c-4918-b7f9-a08a425a5012-srv-cert\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829518 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-service-ca-bundle\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829547 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-default-certificate\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829567 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9acbdb51-299c-4f92-9dbc-72cc388b9985-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829586 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829607 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-serving-cert\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829627 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-service-ca\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829645 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-oauth-serving-cert\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829669 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9306ec08-b511-460b-be5d-a7f698672ffe-serving-cert\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829699 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829716 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-signing-key\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829735 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-bound-sa-token\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829753 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-772n7\" (UniqueName: \"kubernetes.io/projected/1f021cde-f007-411e-95c6-6be5f2bc10a4-kube-api-access-772n7\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829771 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmptb\" (UniqueName: \"kubernetes.io/projected/13260ddb-8d39-4a5c-bc14-15838dc93ff1-kube-api-access-cmptb\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829803 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-tls\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829848 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829874 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tc9f\" (UniqueName: \"kubernetes.io/projected/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-kube-api-access-6tc9f\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829899 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-profile-collector-cert\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829922 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pztxl\" (UniqueName: \"kubernetes.io/projected/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-kube-api-access-pztxl\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829950 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.829980 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lprqh\" (UniqueName: \"kubernetes.io/projected/821e4208-46ce-483c-a06f-83d5e8d74cf0-kube-api-access-lprqh\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830007 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6m9p\" (UniqueName: \"kubernetes.io/projected/89121f81-0dec-49f3-adf1-141069760008-kube-api-access-h6m9p\") pod \"migrator-59844c95c7-h5ptp\" (UID: \"89121f81-0dec-49f3-adf1-141069760008\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830031 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-registration-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830055 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/425e3312-93c3-42cc-a840-a6cbb635e244-webhook-cert\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830109 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j424z\" (UniqueName: \"kubernetes.io/projected/2dc1745c-b329-40b4-8f42-e44e563f452f-kube-api-access-j424z\") pod \"multus-admission-controller-857f4d67dd-t4v67\" (UID: \"2dc1745c-b329-40b4-8f42-e44e563f452f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830140 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-config\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830165 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-stats-auth\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830203 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-plugins-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830232 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830256 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-serving-cert\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830276 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/207d72d8-7daf-4223-9b7a-25c4edfdb490-service-ca-bundle\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830301 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9eddc646-c3eb-4276-b359-f2c372c16827-proxy-tls\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830336 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830366 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8s7fv\" (UniqueName: \"kubernetes.io/projected/5bf1cd76-69a8-496f-a6a9-1adc0022d829-kube-api-access-8s7fv\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830396 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/425e3312-93c3-42cc-a840-a6cbb635e244-apiservice-cert\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830426 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-trusted-ca\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830445 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1f021cde-f007-411e-95c6-6be5f2bc10a4-metrics-tls\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830464 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1d87b0ff-e00c-4918-b7f9-a08a425a5012-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830483 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9eddc646-c3eb-4276-b359-f2c372c16827-auth-proxy-config\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830516 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830543 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7ssn\" (UniqueName: \"kubernetes.io/projected/1d87b0ff-e00c-4918-b7f9-a08a425a5012-kube-api-access-q7ssn\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830568 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq7qv\" (UniqueName: \"kubernetes.io/projected/9eddc646-c3eb-4276-b359-f2c372c16827-kube-api-access-nq7qv\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830637 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8969bc0-07ec-44a0-98fd-03669c3557db-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830688 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swtcs\" (UniqueName: \"kubernetes.io/projected/b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d-kube-api-access-swtcs\") pod \"ingress-canary-2chwr\" (UID: \"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d\") " pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830722 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxdqh\" (UniqueName: \"kubernetes.io/projected/8051931c-c553-49fb-82bc-f584e6a34ff2-kube-api-access-cxdqh\") pod \"package-server-manager-789f6589d5-mb6jf\" (UID: \"8051931c-c553-49fb-82bc-f584e6a34ff2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830748 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx675\" (UniqueName: \"kubernetes.io/projected/89073c83-c4b0-460e-8011-433081541325-kube-api-access-vx675\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830774 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b78fa09-bd0f-4128-8652-09d27bad2427-config\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830864 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830896 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8051931c-c553-49fb-82bc-f584e6a34ff2-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-mb6jf\" (UID: \"8051931c-c553-49fb-82bc-f584e6a34ff2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830955 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qthgz\" (UniqueName: \"kubernetes.io/projected/e67f1677-e72c-4ee7-ae5e-b1d80a6597fe-kube-api-access-qthgz\") pod \"control-plane-machine-set-operator-78cbb6b69f-jv45s\" (UID: \"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.830985 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dbhn\" (UniqueName: \"kubernetes.io/projected/9306ec08-b511-460b-be5d-a7f698672ffe-kube-api-access-7dbhn\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831010 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8969bc0-07ec-44a0-98fd-03669c3557db-config\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831045 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831080 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-socket-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831118 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ksw8\" (UniqueName: \"kubernetes.io/projected/9acbdb51-299c-4f92-9dbc-72cc388b9985-kube-api-access-4ksw8\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831751 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-metrics-certs\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831799 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831837 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr694\" (UniqueName: \"kubernetes.io/projected/a8757a02-6c3c-4f30-9ede-32cc55c8b616-kube-api-access-pr694\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831888 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9306ec08-b511-460b-be5d-a7f698672ffe-config\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831907 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e67f1677-e72c-4ee7-ae5e-b1d80a6597fe-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jv45s\" (UID: \"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831929 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831951 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-oauth-config\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.831973 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nq77\" (UniqueName: \"kubernetes.io/projected/e5fdb8ec-d75b-4d58-9f56-1952df83cc29-kube-api-access-4nq77\") pod \"dns-operator-744455d44c-zjsrt\" (UID: \"e5fdb8ec-d75b-4d58-9f56-1952df83cc29\") " pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832032 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42l5b\" (UniqueName: \"kubernetes.io/projected/55e9e787-3f8b-4a88-8693-7e0b265b4724-kube-api-access-42l5b\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832055 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2dc1745c-b329-40b4-8f42-e44e563f452f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t4v67\" (UID: \"2dc1745c-b329-40b4-8f42-e44e563f452f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832073 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-signing-cabundle\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832101 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6b78fa09-bd0f-4128-8652-09d27bad2427-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832147 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832167 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b78fa09-bd0f-4128-8652-09d27bad2427-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832183 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-srv-cert\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832206 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a8757a02-6c3c-4f30-9ede-32cc55c8b616-metrics-tls\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832227 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-dir\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832246 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832282 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7kmv\" (UniqueName: \"kubernetes.io/projected/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-kube-api-access-h7kmv\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832300 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832338 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prwd9\" (UniqueName: \"kubernetes.io/projected/425e3312-93c3-42cc-a840-a6cbb635e244-kube-api-access-prwd9\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832368 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832393 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/821e4208-46ce-483c-a06f-83d5e8d74cf0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.832415 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9eddc646-c3eb-4276-b359-f2c372c16827-images\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.833971 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/821e4208-46ce-483c-a06f-83d5e8d74cf0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.834637 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-policies\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.835830 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-service-ca-bundle\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.838136 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.839264 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-trusted-ca-bundle\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.839423 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-console-config\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.840446 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-certificates\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.840497 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-oauth-serving-cert\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: E1013 06:48:54.841283 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.341267339 +0000 UTC m=+143.028712531 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.841573 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.841712 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-dir\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.845648 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.848164 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.848259 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.851136 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.851378 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.852492 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-config\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.853733 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-trusted-ca\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.854370 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e5fdb8ec-d75b-4d58-9f56-1952df83cc29-metrics-tls\") pod \"dns-operator-744455d44c-zjsrt\" (UID: \"e5fdb8ec-d75b-4d58-9f56-1952df83cc29\") " pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.855170 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.859657 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-service-ca\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.859875 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.860252 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.883663 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-oauth-config\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.884307 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-tls\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.884539 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.885456 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.885633 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.885723 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.886249 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.890745 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.898776 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-serving-cert\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.903033 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.903743 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/821e4208-46ce-483c-a06f-83d5e8d74cf0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.905760 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-serving-cert\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.908701 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rdch\" (UniqueName: \"kubernetes.io/projected/4e2fa3b0-29e6-4bdd-aed7-82cc6690b549-kube-api-access-7rdch\") pod \"authentication-operator-69f744f599-gkm66\" (UID: \"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.909706 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fgxc\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-kube-api-access-9fgxc\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.912272 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" event={"ID":"fa3d6ad9-c819-4a98-9ded-9371e6259f9b","Type":"ContainerStarted","Data":"800f719627e5c5e203c482be78aa3290d7ddd4f5da05c0767455b80a9696608a"} Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.916445 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" event={"ID":"37817d7f-3ba8-4e24-844d-72a8860dd693","Type":"ContainerStarted","Data":"f85d4dd746b4e47e8d6488e66e79ea12e148285a5617e099de2f041a10516584"} Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.916833 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" event={"ID":"37817d7f-3ba8-4e24-844d-72a8860dd693","Type":"ContainerStarted","Data":"46b98acd84a415bf1ca09e880e62a2e63a45195819705b4a959ad1e373f39826"} Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.917616 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws7fp\" (UniqueName: \"kubernetes.io/projected/fb673675-43af-441e-9cf3-f5f283ef9558-kube-api-access-ws7fp\") pod \"console-f9d7485db-ml2tj\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.944164 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945110 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9306ec08-b511-460b-be5d-a7f698672ffe-serving-cert\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945149 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945171 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-signing-key\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945215 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-772n7\" (UniqueName: \"kubernetes.io/projected/1f021cde-f007-411e-95c6-6be5f2bc10a4-kube-api-access-772n7\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945233 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmptb\" (UniqueName: \"kubernetes.io/projected/13260ddb-8d39-4a5c-bc14-15838dc93ff1-kube-api-access-cmptb\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945291 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pztxl\" (UniqueName: \"kubernetes.io/projected/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-kube-api-access-pztxl\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945524 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-profile-collector-cert\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945544 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6m9p\" (UniqueName: \"kubernetes.io/projected/89121f81-0dec-49f3-adf1-141069760008-kube-api-access-h6m9p\") pod \"migrator-59844c95c7-h5ptp\" (UID: \"89121f81-0dec-49f3-adf1-141069760008\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945569 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-registration-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945587 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/425e3312-93c3-42cc-a840-a6cbb635e244-webhook-cert\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945605 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j424z\" (UniqueName: \"kubernetes.io/projected/2dc1745c-b329-40b4-8f42-e44e563f452f-kube-api-access-j424z\") pod \"multus-admission-controller-857f4d67dd-t4v67\" (UID: \"2dc1745c-b329-40b4-8f42-e44e563f452f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945626 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-stats-auth\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945644 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-plugins-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945662 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/207d72d8-7daf-4223-9b7a-25c4edfdb490-service-ca-bundle\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945681 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9eddc646-c3eb-4276-b359-f2c372c16827-proxy-tls\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945699 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/425e3312-93c3-42cc-a840-a6cbb635e244-apiservice-cert\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945726 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8s7fv\" (UniqueName: \"kubernetes.io/projected/5bf1cd76-69a8-496f-a6a9-1adc0022d829-kube-api-access-8s7fv\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945744 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1f021cde-f007-411e-95c6-6be5f2bc10a4-metrics-tls\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945766 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1d87b0ff-e00c-4918-b7f9-a08a425a5012-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945785 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9eddc646-c3eb-4276-b359-f2c372c16827-auth-proxy-config\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945810 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945847 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7ssn\" (UniqueName: \"kubernetes.io/projected/1d87b0ff-e00c-4918-b7f9-a08a425a5012-kube-api-access-q7ssn\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945865 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nq7qv\" (UniqueName: \"kubernetes.io/projected/9eddc646-c3eb-4276-b359-f2c372c16827-kube-api-access-nq7qv\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945890 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8969bc0-07ec-44a0-98fd-03669c3557db-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945911 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swtcs\" (UniqueName: \"kubernetes.io/projected/b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d-kube-api-access-swtcs\") pod \"ingress-canary-2chwr\" (UID: \"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d\") " pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945937 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxdqh\" (UniqueName: \"kubernetes.io/projected/8051931c-c553-49fb-82bc-f584e6a34ff2-kube-api-access-cxdqh\") pod \"package-server-manager-789f6589d5-mb6jf\" (UID: \"8051931c-c553-49fb-82bc-f584e6a34ff2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945968 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx675\" (UniqueName: \"kubernetes.io/projected/89073c83-c4b0-460e-8011-433081541325-kube-api-access-vx675\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.945999 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b78fa09-bd0f-4128-8652-09d27bad2427-config\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946041 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8051931c-c553-49fb-82bc-f584e6a34ff2-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-mb6jf\" (UID: \"8051931c-c553-49fb-82bc-f584e6a34ff2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946074 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8969bc0-07ec-44a0-98fd-03669c3557db-config\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946092 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-socket-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946110 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qthgz\" (UniqueName: \"kubernetes.io/projected/e67f1677-e72c-4ee7-ae5e-b1d80a6597fe-kube-api-access-qthgz\") pod \"control-plane-machine-set-operator-78cbb6b69f-jv45s\" (UID: \"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946127 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dbhn\" (UniqueName: \"kubernetes.io/projected/9306ec08-b511-460b-be5d-a7f698672ffe-kube-api-access-7dbhn\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946145 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ksw8\" (UniqueName: \"kubernetes.io/projected/9acbdb51-299c-4f92-9dbc-72cc388b9985-kube-api-access-4ksw8\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946173 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-metrics-certs\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946191 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr694\" (UniqueName: \"kubernetes.io/projected/a8757a02-6c3c-4f30-9ede-32cc55c8b616-kube-api-access-pr694\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946210 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9306ec08-b511-460b-be5d-a7f698672ffe-config\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946231 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e67f1677-e72c-4ee7-ae5e-b1d80a6597fe-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jv45s\" (UID: \"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946279 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42l5b\" (UniqueName: \"kubernetes.io/projected/55e9e787-3f8b-4a88-8693-7e0b265b4724-kube-api-access-42l5b\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946307 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2dc1745c-b329-40b4-8f42-e44e563f452f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t4v67\" (UID: \"2dc1745c-b329-40b4-8f42-e44e563f452f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946333 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-signing-cabundle\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946359 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6b78fa09-bd0f-4128-8652-09d27bad2427-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946383 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b78fa09-bd0f-4128-8652-09d27bad2427-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946401 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-srv-cert\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946419 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a8757a02-6c3c-4f30-9ede-32cc55c8b616-metrics-tls\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946442 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prwd9\" (UniqueName: \"kubernetes.io/projected/425e3312-93c3-42cc-a840-a6cbb635e244-kube-api-access-prwd9\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946466 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9eddc646-c3eb-4276-b359-f2c372c16827-images\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946484 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kc82s\" (UniqueName: \"kubernetes.io/projected/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-kube-api-access-kc82s\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946501 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946518 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946537 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9acbdb51-299c-4f92-9dbc-72cc388b9985-proxy-tls\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946554 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c8969bc0-07ec-44a0-98fd-03669c3557db-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946578 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d-cert\") pod \"ingress-canary-2chwr\" (UID: \"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d\") " pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946597 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q89hc\" (UniqueName: \"kubernetes.io/projected/207d72d8-7daf-4223-9b7a-25c4edfdb490-kube-api-access-q89hc\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946616 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89073c83-c4b0-460e-8011-433081541325-config-volume\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946637 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a8757a02-6c3c-4f30-9ede-32cc55c8b616-trusted-ca\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946656 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13260ddb-8d39-4a5c-bc14-15838dc93ff1-node-bootstrap-token\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.946674 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89073c83-c4b0-460e-8011-433081541325-secret-volume\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.948943 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-csi-data-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.948989 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1f021cde-f007-411e-95c6-6be5f2bc10a4-config-volume\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949025 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stdqf\" (UniqueName: \"kubernetes.io/projected/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-kube-api-access-stdqf\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949052 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/425e3312-93c3-42cc-a840-a6cbb635e244-tmpfs\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949079 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13260ddb-8d39-4a5c-bc14-15838dc93ff1-certs\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949101 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-mountpoint-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949123 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a8757a02-6c3c-4f30-9ede-32cc55c8b616-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949144 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1d87b0ff-e00c-4918-b7f9-a08a425a5012-srv-cert\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949167 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-default-certificate\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949187 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9acbdb51-299c-4f92-9dbc-72cc388b9985-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.949979 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b78fa09-bd0f-4128-8652-09d27bad2427-config\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:54 crc kubenswrapper[4664]: E1013 06:48:54.950143 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.450118751 +0000 UTC m=+143.137564143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.944482 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" event={"ID":"3881c61c-1e8b-437c-85b1-6bade9f8f4f9","Type":"ContainerStarted","Data":"d4f5cc79a921412cfcf9ebb4a4fe3f0287f73e4236761e5ded2b777ac2085f5c"} Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.950177 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9acbdb51-299c-4f92-9dbc-72cc388b9985-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.950202 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" event={"ID":"3881c61c-1e8b-437c-85b1-6bade9f8f4f9","Type":"ContainerStarted","Data":"64371e2fda8386791d7d21778c47e411b2ec2ad389be7ea410732e287c1c8beb"} Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.950229 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" event={"ID":"3881c61c-1e8b-437c-85b1-6bade9f8f4f9","Type":"ContainerStarted","Data":"d35030fe31239ef8687585879c80991d7c9b60e384bd1769e72957e54d16f2c6"} Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.951460 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/207d72d8-7daf-4223-9b7a-25c4edfdb490-service-ca-bundle\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.975107 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8051931c-c553-49fb-82bc-f584e6a34ff2-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-mb6jf\" (UID: \"8051931c-c553-49fb-82bc-f584e6a34ff2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.975650 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-bound-sa-token\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.981614 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-registration-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:54 crc kubenswrapper[4664]: I1013 06:48:54.987226 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a8757a02-6c3c-4f30-9ede-32cc55c8b616-trusted-ca\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:54.991693 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-plugins-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:54.993008 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-stats-auth\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:54.993946 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/425e3312-93c3-42cc-a840-a6cbb635e244-webhook-cert\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:54.995309 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7kmv\" (UniqueName: \"kubernetes.io/projected/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-kube-api-access-h7kmv\") pod \"oauth-openshift-558db77b4-47mrj\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:54.995766 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-profile-collector-cert\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:54.995932 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" event={"ID":"55a00b0d-9ade-48a5-963f-fd17e913ef4b","Type":"ContainerStarted","Data":"8af30ff7838926981495e51864f0c633a957ec2e7e2936634dbaf5f0ef3deb05"} Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.042109 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" event={"ID":"c43c15db-46da-4cc0-b0fe-0ffaee273be6","Type":"ContainerStarted","Data":"45ba609a01840eb747ef8dec94d95b42a4ccc68b173c07f4471e9766f00bc397"} Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.042153 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" event={"ID":"c43c15db-46da-4cc0-b0fe-0ffaee273be6","Type":"ContainerStarted","Data":"c82276f3cb1d1b97bb23ce35e792a312d53ee5c758a9ad326684bf87520c821d"} Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.042166 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" event={"ID":"65efd88f-be68-494a-a3b8-3a1b2df263d9","Type":"ContainerDied","Data":"98287c1c1374839bf9696f4ba0c47a64f1973256eadabacd5b390f6f1d238550"} Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.042215 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.006431 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13260ddb-8d39-4a5c-bc14-15838dc93ff1-node-bootstrap-token\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.007420 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/425e3312-93c3-42cc-a840-a6cbb635e244-tmpfs\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.011466 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9eddc646-c3eb-4276-b359-f2c372c16827-proxy-tls\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.015342 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-mountpoint-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.020578 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-default-certificate\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.022041 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13260ddb-8d39-4a5c-bc14-15838dc93ff1-certs\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.022838 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9eddc646-c3eb-4276-b359-f2c372c16827-auth-proxy-config\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.023146 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-signing-cabundle\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.023565 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/425e3312-93c3-42cc-a840-a6cbb635e244-apiservice-cert\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.024393 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.024407 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8969bc0-07ec-44a0-98fd-03669c3557db-config\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.024464 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-socket-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.031648 4664 generic.go:334] "Generic (PLEG): container finished" podID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerID="98287c1c1374839bf9696f4ba0c47a64f1973256eadabacd5b390f6f1d238550" exitCode=0 Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.001564 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d-cert\") pod \"ingress-canary-2chwr\" (UID: \"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d\") " pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.038553 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b78fa09-bd0f-4128-8652-09d27bad2427-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.003419 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89073c83-c4b0-460e-8011-433081541325-config-volume\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.004893 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/55e9e787-3f8b-4a88-8693-7e0b265b4724-csi-data-dir\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.005618 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1f021cde-f007-411e-95c6-6be5f2bc10a4-config-volume\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.038234 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lprqh\" (UniqueName: \"kubernetes.io/projected/821e4208-46ce-483c-a06f-83d5e8d74cf0-kube-api-access-lprqh\") pod \"openshift-apiserver-operator-796bbdcf4f-xfkng\" (UID: \"821e4208-46ce-483c-a06f-83d5e8d74cf0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.046092 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8969bc0-07ec-44a0-98fd-03669c3557db-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.050719 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nq77\" (UniqueName: \"kubernetes.io/projected/e5fdb8ec-d75b-4d58-9f56-1952df83cc29-kube-api-access-4nq77\") pod \"dns-operator-744455d44c-zjsrt\" (UID: \"e5fdb8ec-d75b-4d58-9f56-1952df83cc29\") " pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.052843 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1f021cde-f007-411e-95c6-6be5f2bc10a4-metrics-tls\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.053621 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2dc1745c-b329-40b4-8f42-e44e563f452f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t4v67\" (UID: \"2dc1745c-b329-40b4-8f42-e44e563f452f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.056397 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89073c83-c4b0-460e-8011-433081541325-secret-volume\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.057040 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-signing-key\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.057142 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1d87b0ff-e00c-4918-b7f9-a08a425a5012-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.057723 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9306ec08-b511-460b-be5d-a7f698672ffe-serving-cert\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.066355 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1d87b0ff-e00c-4918-b7f9-a08a425a5012-srv-cert\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.076477 4664 patch_prober.go:28] interesting pod/downloads-7954f5f757-dh2vl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.076554 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dh2vl" podUID="d981b25e-c830-4f47-9851-d51db10ed5bf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.081184 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9acbdb51-299c-4f92-9dbc-72cc388b9985-proxy-tls\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.091363 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tc9f\" (UniqueName: \"kubernetes.io/projected/a3bc8d71-befb-4d03-9aa9-9ede609c04cb-kube-api-access-6tc9f\") pod \"openshift-controller-manager-operator-756b6f6bc6-tsfsd\" (UID: \"a3bc8d71-befb-4d03-9aa9-9ede609c04cb\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.092386 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9eddc646-c3eb-4276-b359-f2c372c16827-images\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.094429 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9306ec08-b511-460b-be5d-a7f698672ffe-config\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.099308 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.102155 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e67f1677-e72c-4ee7-ae5e-b1d80a6597fe-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jv45s\" (UID: \"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.103081 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.103400 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.603377328 +0000 UTC m=+143.290822520 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.104422 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/207d72d8-7daf-4223-9b7a-25c4edfdb490-metrics-certs\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.106073 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.106128 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-srv-cert\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.106613 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.106703 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j424z\" (UniqueName: \"kubernetes.io/projected/2dc1745c-b329-40b4-8f42-e44e563f452f-kube-api-access-j424z\") pod \"multus-admission-controller-857f4d67dd-t4v67\" (UID: \"2dc1745c-b329-40b4-8f42-e44e563f452f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.143765 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pztxl\" (UniqueName: \"kubernetes.io/projected/3344b8d9-b31e-43f7-81fb-1ebc893f5a90-kube-api-access-pztxl\") pod \"kube-storage-version-migrator-operator-b67b599dd-zl55w\" (UID: \"3344b8d9-b31e-43f7-81fb-1ebc893f5a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.144490 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a8757a02-6c3c-4f30-9ede-32cc55c8b616-metrics-tls\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.145341 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" event={"ID":"af35da51-424c-475c-8306-87c51913a3f9","Type":"ContainerStarted","Data":"882dc2b081b39bcc031e494441c4526b52d68bab729482dc8596a6ab2aa84ebd"} Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.145643 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" event={"ID":"af35da51-424c-475c-8306-87c51913a3f9","Type":"ContainerStarted","Data":"8585befce2757103b366cbddcff85bac86fd89f0b59db967d2dcd262095a793c"} Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.145847 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.146228 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.147228 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6m9p\" (UniqueName: \"kubernetes.io/projected/89121f81-0dec-49f3-adf1-141069760008-kube-api-access-h6m9p\") pod \"migrator-59844c95c7-h5ptp\" (UID: \"89121f81-0dec-49f3-adf1-141069760008\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.156435 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.158683 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmptb\" (UniqueName: \"kubernetes.io/projected/13260ddb-8d39-4a5c-bc14-15838dc93ff1-kube-api-access-cmptb\") pod \"machine-config-server-z4pxt\" (UID: \"13260ddb-8d39-4a5c-bc14-15838dc93ff1\") " pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.178228 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stdqf\" (UniqueName: \"kubernetes.io/projected/bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c-kube-api-access-stdqf\") pod \"service-ca-9c57cc56f-vpv87\" (UID: \"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c\") " pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.179580 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42l5b\" (UniqueName: \"kubernetes.io/projected/55e9e787-3f8b-4a88-8693-7e0b265b4724-kube-api-access-42l5b\") pod \"csi-hostpathplugin-v4pkt\" (UID: \"55e9e787-3f8b-4a88-8693-7e0b265b4724\") " pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.189678 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6bz9t"] Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.198178 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q89hc\" (UniqueName: \"kubernetes.io/projected/207d72d8-7daf-4223-9b7a-25c4edfdb490-kube-api-access-q89hc\") pod \"router-default-5444994796-4bxj9\" (UID: \"207d72d8-7daf-4223-9b7a-25c4edfdb490\") " pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.203642 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.206303 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.706270863 +0000 UTC m=+143.393716055 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.219590 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.220338 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx675\" (UniqueName: \"kubernetes.io/projected/89073c83-c4b0-460e-8011-433081541325-kube-api-access-vx675\") pod \"collect-profiles-29338965-nsh49\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.243461 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-772n7\" (UniqueName: \"kubernetes.io/projected/1f021cde-f007-411e-95c6-6be5f2bc10a4-kube-api-access-772n7\") pod \"dns-default-lfw8h\" (UID: \"1f021cde-f007-411e-95c6-6be5f2bc10a4\") " pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.258978 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.273811 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a8757a02-6c3c-4f30-9ede-32cc55c8b616-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.276756 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8s7fv\" (UniqueName: \"kubernetes.io/projected/5bf1cd76-69a8-496f-a6a9-1adc0022d829-kube-api-access-8s7fv\") pod \"marketplace-operator-79b997595-p6fvb\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.279783 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.288500 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2vkt4"] Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.296529 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq7qv\" (UniqueName: \"kubernetes.io/projected/9eddc646-c3eb-4276-b359-f2c372c16827-kube-api-access-nq7qv\") pod \"machine-config-operator-74547568cd-czxhw\" (UID: \"9eddc646-c3eb-4276-b359-f2c372c16827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.306536 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qljbh\" (UniqueName: \"kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.306701 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.307340 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.807321877 +0000 UTC m=+143.494767069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.309907 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.317791 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv"] Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.321389 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.327639 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qljbh\" (UniqueName: \"kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh\") pod \"controller-manager-879f6c89f-f66qq\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.327659 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxdqh\" (UniqueName: \"kubernetes.io/projected/8051931c-c553-49fb-82bc-f584e6a34ff2-kube-api-access-cxdqh\") pod \"package-server-manager-789f6589d5-mb6jf\" (UID: \"8051931c-c553-49fb-82bc-f584e6a34ff2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.331351 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.343804 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swtcs\" (UniqueName: \"kubernetes.io/projected/b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d-kube-api-access-swtcs\") pod \"ingress-canary-2chwr\" (UID: \"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d\") " pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.351583 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7ssn\" (UniqueName: \"kubernetes.io/projected/1d87b0ff-e00c-4918-b7f9-a08a425a5012-kube-api-access-q7ssn\") pod \"olm-operator-6b444d44fb-8jkvs\" (UID: \"1d87b0ff-e00c-4918-b7f9-a08a425a5012\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.353419 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" Oct 13 06:48:55 crc kubenswrapper[4664]: W1013 06:48:55.354026 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf40d27a_b0f2_4169_ad05_5ff9e4d46595.slice/crio-a27269bbc5bf80ef0bc22d0098991b8a6ce6f8f9dc508e49413ea69b50e78bee WatchSource:0}: Error finding container a27269bbc5bf80ef0bc22d0098991b8a6ce6f8f9dc508e49413ea69b50e78bee: Status 404 returned error can't find the container with id a27269bbc5bf80ef0bc22d0098991b8a6ce6f8f9dc508e49413ea69b50e78bee Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.355481 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.366047 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-z4pxt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.368093 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-lfw8h" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.368262 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.383229 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.395972 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2chwr" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.396274 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6b78fa09-bd0f-4128-8652-09d27bad2427-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-49z5z\" (UID: \"6b78fa09-bd0f-4128-8652-09d27bad2427\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.396457 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.408947 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.409608 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:55.909580474 +0000 UTC m=+143.597025666 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.409768 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.419179 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dbhn\" (UniqueName: \"kubernetes.io/projected/9306ec08-b511-460b-be5d-a7f698672ffe-kube-api-access-7dbhn\") pod \"service-ca-operator-777779d784-f67l8\" (UID: \"9306ec08-b511-460b-be5d-a7f698672ffe\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.451187 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kc82s\" (UniqueName: \"kubernetes.io/projected/6beaedb4-8ff1-4956-a04c-b4007e2d1c50-kube-api-access-kc82s\") pod \"catalog-operator-68c6474976-pgcw9\" (UID: \"6beaedb4-8ff1-4956-a04c-b4007e2d1c50\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.471880 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c8969bc0-07ec-44a0-98fd-03669c3557db-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zh2dr\" (UID: \"c8969bc0-07ec-44a0-98fd-03669c3557db\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.480515 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prwd9\" (UniqueName: \"kubernetes.io/projected/425e3312-93c3-42cc-a840-a6cbb635e244-kube-api-access-prwd9\") pod \"packageserver-d55dfcdfc-jz47t\" (UID: \"425e3312-93c3-42cc-a840-a6cbb635e244\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.493918 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.508277 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.508874 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ksw8\" (UniqueName: \"kubernetes.io/projected/9acbdb51-299c-4f92-9dbc-72cc388b9985-kube-api-access-4ksw8\") pod \"machine-config-controller-84d6567774-62tk8\" (UID: \"9acbdb51-299c-4f92-9dbc-72cc388b9985\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.511319 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.511783 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.011766819 +0000 UTC m=+143.699212001 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.535369 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr694\" (UniqueName: \"kubernetes.io/projected/a8757a02-6c3c-4f30-9ede-32cc55c8b616-kube-api-access-pr694\") pod \"ingress-operator-5b745b69d9-v46hd\" (UID: \"a8757a02-6c3c-4f30-9ede-32cc55c8b616\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.539230 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qthgz\" (UniqueName: \"kubernetes.io/projected/e67f1677-e72c-4ee7-ae5e-b1d80a6597fe-kube-api-access-qthgz\") pod \"control-plane-machine-set-operator-78cbb6b69f-jv45s\" (UID: \"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.548721 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.566200 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.570865 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.586946 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.589336 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.612773 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.613016 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.112983157 +0000 UTC m=+143.800428349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.613102 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.613618 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.113598905 +0000 UTC m=+143.801044097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.614281 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.626669 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.653666 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.728228 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.728599 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.228557778 +0000 UTC m=+143.916002970 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.730301 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.730739 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.230729228 +0000 UTC m=+143.918174420 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.788034 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.831758 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.832230 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.332205224 +0000 UTC m=+144.019650416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.839806 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" Oct 13 06:48:55 crc kubenswrapper[4664]: I1013 06:48:55.939532 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:55 crc kubenswrapper[4664]: E1013 06:48:55.939965 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.439951655 +0000 UTC m=+144.127396847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.011420 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.040929 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.041298 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.541262345 +0000 UTC m=+144.228707537 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.066240 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.079972 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ml2tj"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.087292 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gkm66"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.146375 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.147775 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.647750841 +0000 UTC m=+144.335196023 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.172363 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" event={"ID":"39908d1a-79af-485a-8deb-43f03552b3d1","Type":"ContainerStarted","Data":"74037942c68cc1e918f1461b47dda60c6de3730b6cce924bddbdd0b097724fa9"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.172614 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" event={"ID":"39908d1a-79af-485a-8deb-43f03552b3d1","Type":"ContainerStarted","Data":"da270a2e3e6c34d79c830a317c17eb0a0e5913f2e6adc936c1e1f8da6312da05"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.174253 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.190514 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" event={"ID":"cf40d27a-b0f2-4169-ad05-5ff9e4d46595","Type":"ContainerStarted","Data":"a27269bbc5bf80ef0bc22d0098991b8a6ce6f8f9dc508e49413ea69b50e78bee"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.190945 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.191016 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.197112 4664 generic.go:334] "Generic (PLEG): container finished" podID="fa3d6ad9-c819-4a98-9ded-9371e6259f9b" containerID="6bd7cb135c31fce86ae988bcefff224f5dcc86b153d6b63881a846f52dbb8410" exitCode=0 Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.197202 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" event={"ID":"fa3d6ad9-c819-4a98-9ded-9371e6259f9b","Type":"ContainerDied","Data":"6bd7cb135c31fce86ae988bcefff224f5dcc86b153d6b63881a846f52dbb8410"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.211795 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.224912 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" event={"ID":"37817d7f-3ba8-4e24-844d-72a8860dd693","Type":"ContainerStarted","Data":"d172d67d53693a1e1549924d4501c0951528a76bca26494eea3e3f03b89610aa"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.232498 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" event={"ID":"3f93db1a-5cc4-491b-ac61-3e679b9e9686","Type":"ContainerStarted","Data":"171d8e643d3096b58da4813723f0d870edba0edb54291eb9602ef96e38ac8e75"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.248892 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.249310 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.749284609 +0000 UTC m=+144.436729801 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.254252 4664 generic.go:334] "Generic (PLEG): container finished" podID="55a00b0d-9ade-48a5-963f-fd17e913ef4b" containerID="b7af2549df2b67a3e9093ee056f03601cd99c5b560eb8ac6163a03580cc5fd66" exitCode=0 Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.257653 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" event={"ID":"55a00b0d-9ade-48a5-963f-fd17e913ef4b","Type":"ContainerDied","Data":"b7af2549df2b67a3e9093ee056f03601cd99c5b560eb8ac6163a03580cc5fd66"} Oct 13 06:48:56 crc kubenswrapper[4664]: W1013 06:48:56.276032 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3344b8d9_b31e_43f7_81fb_1ebc893f5a90.slice/crio-91bc23b930d83b047f10e952fcc7532623497dd2c95896ba9365a1927792fc78 WatchSource:0}: Error finding container 91bc23b930d83b047f10e952fcc7532623497dd2c95896ba9365a1927792fc78: Status 404 returned error can't find the container with id 91bc23b930d83b047f10e952fcc7532623497dd2c95896ba9365a1927792fc78 Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.293134 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.321127 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" event={"ID":"b3022adf-c38f-450e-9f28-4581365f36e9","Type":"ContainerStarted","Data":"e259b184741e0c8f71617e1dfabbb967538ab5ba4f59cea02a443028d6c500a2"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.321167 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" event={"ID":"b3022adf-c38f-450e-9f28-4581365f36e9","Type":"ContainerStarted","Data":"c4091f20746a4916880ff4a793a233a6750782fd1d51a5b437c3c6b0c0b0680a"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.338297 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" event={"ID":"65efd88f-be68-494a-a3b8-3a1b2df263d9","Type":"ContainerStarted","Data":"36bed393495904507ddabd8cc59b5eaedcf2f31bdcc2ff39a3d28dca2eb91ba9"} Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.338345 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.344041 4664 patch_prober.go:28] interesting pod/downloads-7954f5f757-dh2vl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.344118 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dh2vl" podUID="d981b25e-c830-4f47-9851-d51db10ed5bf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.351055 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.354014 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.853995034 +0000 UTC m=+144.541440226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.368742 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vpv87"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.379827 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t4v67"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.452602 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.454687 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:56.954650236 +0000 UTC m=+144.642095568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.556293 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.556690 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.056676677 +0000 UTC m=+144.744121869 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.664642 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-lfw8h"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.665085 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.665446 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.165433507 +0000 UTC m=+144.852878699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.783969 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.784866 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.284850865 +0000 UTC m=+144.972296057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.786424 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-dh2vl" podStartSLOduration=123.786411268 podStartE2EDuration="2m3.786411268s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:56.736079087 +0000 UTC m=+144.423524299" watchObservedRunningTime="2025-10-13 06:48:56.786411268 +0000 UTC m=+144.473856460" Oct 13 06:48:56 crc kubenswrapper[4664]: W1013 06:48:56.807792 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f021cde_f007_411e_95c6_6be5f2bc10a4.slice/crio-963b1c9f25fa01569b72810448e2f9f670c300752376fa78e48764b68b88f583 WatchSource:0}: Error finding container 963b1c9f25fa01569b72810448e2f9f670c300752376fa78e48764b68b88f583: Status 404 returned error can't find the container with id 963b1c9f25fa01569b72810448e2f9f670c300752376fa78e48764b68b88f583 Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.825758 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.867690 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-p6fvb"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.886313 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.887002 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.386967008 +0000 UTC m=+145.074412200 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.897212 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-f66qq"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.937048 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-nhbkw" podStartSLOduration=123.937025752 podStartE2EDuration="2m3.937025752s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:56.931531338 +0000 UTC m=+144.618976530" watchObservedRunningTime="2025-10-13 06:48:56.937025752 +0000 UTC m=+144.624470944" Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.945320 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zjsrt"] Oct 13 06:48:56 crc kubenswrapper[4664]: I1013 06:48:56.988022 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:56 crc kubenswrapper[4664]: E1013 06:48:56.989787 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.48976609 +0000 UTC m=+145.177211282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: W1013 06:48:57.076851 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5bf1cd76_69a8_496f_a6a9_1adc0022d829.slice/crio-00b43b6962d46f49494882708c2859ea764b4b5d3680cb457d8c3428ef289abf WatchSource:0}: Error finding container 00b43b6962d46f49494882708c2859ea764b4b5d3680cb457d8c3428ef289abf: Status 404 returned error can't find the container with id 00b43b6962d46f49494882708c2859ea764b4b5d3680cb457d8c3428ef289abf Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.091395 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.092342 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.592299616 +0000 UTC m=+145.279744968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.172907 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dn8d4" podStartSLOduration=125.172873885 podStartE2EDuration="2m5.172873885s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:57.126374821 +0000 UTC m=+144.813820033" watchObservedRunningTime="2025-10-13 06:48:57.172873885 +0000 UTC m=+144.860319087" Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.197246 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.197757 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.697741412 +0000 UTC m=+145.385186604 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.228355 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podStartSLOduration=125.22832967 podStartE2EDuration="2m5.22832967s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:57.225403798 +0000 UTC m=+144.912849020" watchObservedRunningTime="2025-10-13 06:48:57.22832967 +0000 UTC m=+144.915774862" Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.300485 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.314928 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" podStartSLOduration=124.314899337 podStartE2EDuration="2m4.314899337s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:57.307276663 +0000 UTC m=+144.994721855" watchObservedRunningTime="2025-10-13 06:48:57.314899337 +0000 UTC m=+145.002344529" Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.316282 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs"] Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.324294 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.824260379 +0000 UTC m=+145.511705571 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.412264 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-47mrj"] Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.426776 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" event={"ID":"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c","Type":"ContainerStarted","Data":"0aad5a2b7a3cfbbe1608cb0b98082d33926fdf96323b7ecd53c5aca8f14cee19"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.428124 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.428669 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:57.928638396 +0000 UTC m=+145.616083588 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.488880 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" event={"ID":"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549","Type":"ContainerStarted","Data":"ee4a468b69facef1ec3e0d380bcef1731b62e6dff40b28674ecbd7a10191d098"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.529330 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4bxj9" event={"ID":"207d72d8-7daf-4223-9b7a-25c4edfdb490","Type":"ContainerStarted","Data":"8522fd25b9aa8520b792e97895a5c35c732eefc84442991a4eb96e46730c5d59"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.530105 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.530757 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.030724169 +0000 UTC m=+145.718169361 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.543963 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-468sf" podStartSLOduration=125.543928949 podStartE2EDuration="2m5.543928949s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:57.528516776 +0000 UTC m=+145.215961988" watchObservedRunningTime="2025-10-13 06:48:57.543928949 +0000 UTC m=+145.231374141" Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.565608 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podStartSLOduration=125.565575806 podStartE2EDuration="2m5.565575806s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:57.563509368 +0000 UTC m=+145.250954570" watchObservedRunningTime="2025-10-13 06:48:57.565575806 +0000 UTC m=+145.253020998" Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.635001 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.640873 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.140840316 +0000 UTC m=+145.828285528 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.688155 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-z4pxt" event={"ID":"13260ddb-8d39-4a5c-bc14-15838dc93ff1","Type":"ContainerStarted","Data":"9185cfba81436f61399d04200da0a49bed709b6c37c9a992abb7ba97da452a2e"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.690055 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" event={"ID":"2dc1745c-b329-40b4-8f42-e44e563f452f","Type":"ContainerStarted","Data":"9167af96a6eb61bd0e928bae25eca1fedae1d0c14c0b1d138b414ffd3aee0e19"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.696941 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6w5q7" podStartSLOduration=124.696923668 podStartE2EDuration="2m4.696923668s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:57.688511772 +0000 UTC m=+145.375956964" watchObservedRunningTime="2025-10-13 06:48:57.696923668 +0000 UTC m=+145.384368870" Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.738256 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.742483 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.242443785 +0000 UTC m=+145.929888977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.802337 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lfw8h" event={"ID":"1f021cde-f007-411e-95c6-6be5f2bc10a4","Type":"ContainerStarted","Data":"963b1c9f25fa01569b72810448e2f9f670c300752376fa78e48764b68b88f583"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.802704 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2chwr"] Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.819942 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" event={"ID":"5bf1cd76-69a8-496f-a6a9-1adc0022d829","Type":"ContainerStarted","Data":"00b43b6962d46f49494882708c2859ea764b4b5d3680cb457d8c3428ef289abf"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.838705 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t"] Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.845011 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.845474 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.345457864 +0000 UTC m=+146.032903056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.845712 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" event={"ID":"89073c83-c4b0-460e-8011-433081541325","Type":"ContainerStarted","Data":"d69ac2381effc8e3a609def8a79eb8afde0e0720f28121e63883fd33af35d7b8"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.877761 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" event={"ID":"89121f81-0dec-49f3-adf1-141069760008","Type":"ContainerStarted","Data":"97a197b264511e2f03982681d3b0528253692817c3049fcfd272291d557daecf"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.933745 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" event={"ID":"3f93db1a-5cc4-491b-ac61-3e679b9e9686","Type":"ContainerStarted","Data":"da6375c76b457ff19704bd513805ed7c4a22b604a46fd7692653178bed4bd86d"} Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.952824 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:57 crc kubenswrapper[4664]: E1013 06:48:57.954870 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.454759548 +0000 UTC m=+146.142204740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.966175 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-2vkt4" podStartSLOduration=124.966150698 podStartE2EDuration="2m4.966150698s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:57.965046966 +0000 UTC m=+145.652492158" watchObservedRunningTime="2025-10-13 06:48:57.966150698 +0000 UTC m=+145.653595890" Oct 13 06:48:57 crc kubenswrapper[4664]: I1013 06:48:57.994570 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" event={"ID":"3344b8d9-b31e-43f7-81fb-1ebc893f5a90","Type":"ContainerStarted","Data":"91bc23b930d83b047f10e952fcc7532623497dd2c95896ba9365a1927792fc78"} Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.047980 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" podStartSLOduration=125.047945131 podStartE2EDuration="2m5.047945131s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:58.03862383 +0000 UTC m=+145.726069032" watchObservedRunningTime="2025-10-13 06:48:58.047945131 +0000 UTC m=+145.735390323" Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.051279 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-f67l8"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.055737 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.056094 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.556081589 +0000 UTC m=+146.243526781 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.111221 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" event={"ID":"821e4208-46ce-483c-a06f-83d5e8d74cf0","Type":"ContainerStarted","Data":"f1fa4f95baaf571bc54548f6d678942d925f2d65dd98708ebd1512550b144ef1"} Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.113320 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9"] Oct 13 06:48:58 crc kubenswrapper[4664]: W1013 06:48:58.144719 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb08976d8_a2e6_4a97_9f1c_f7ca4b5ec98d.slice/crio-bef4dd0f5761e647331079759bd5786598fe6feb709da17fcdf02b7f813484f9 WatchSource:0}: Error finding container bef4dd0f5761e647331079759bd5786598fe6feb709da17fcdf02b7f813484f9: Status 404 returned error can't find the container with id bef4dd0f5761e647331079759bd5786598fe6feb709da17fcdf02b7f813484f9 Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.146147 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ml2tj" event={"ID":"fb673675-43af-441e-9cf3-f5f283ef9558","Type":"ContainerStarted","Data":"7d01b025ab7546e3bf762cbf9a56bc2b8b22cc01ae6ccd2aa2aaf7079ee08e4e"} Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.156321 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.156731 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.656712381 +0000 UTC m=+146.344157573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.174881 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" event={"ID":"a3bc8d71-befb-4d03-9aa9-9ede609c04cb","Type":"ContainerStarted","Data":"9a2c1797194ca8d715fad1adad05a9092c6e83827728a567437e8734d4c108c4"} Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.177706 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-ml2tj" podStartSLOduration=125.177680779 podStartE2EDuration="2m5.177680779s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:58.176870405 +0000 UTC m=+145.864315607" watchObservedRunningTime="2025-10-13 06:48:58.177680779 +0000 UTC m=+145.865125971" Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.179428 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" podStartSLOduration=125.179420097 podStartE2EDuration="2m5.179420097s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:58.144983911 +0000 UTC m=+145.832429103" watchObservedRunningTime="2025-10-13 06:48:58.179420097 +0000 UTC m=+145.866865289" Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.188044 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.248571 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" podStartSLOduration=125.248544915 podStartE2EDuration="2m5.248544915s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:58.23551338 +0000 UTC m=+145.922958572" watchObservedRunningTime="2025-10-13 06:48:58.248544915 +0000 UTC m=+145.935990107" Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.251748 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.267034 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.280739 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.780709997 +0000 UTC m=+146.468155189 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.302149 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.314331 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-v4pkt"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.325494 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.336051 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.369134 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.372447 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.373169 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.373910 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.87388188 +0000 UTC m=+146.561327072 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.397615 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s"] Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.478532 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.479116 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:58.978965556 +0000 UTC m=+146.666410738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.500416 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.580628 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.580760 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.08072183 +0000 UTC m=+146.768167022 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.581134 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.581690 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.081670676 +0000 UTC m=+146.769115868 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.686533 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.686897 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.186842714 +0000 UTC m=+146.874287906 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.687310 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.687926 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.187899584 +0000 UTC m=+146.875344776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.792767 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.793097 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.293043192 +0000 UTC m=+146.980488384 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.793203 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.793773 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.293754543 +0000 UTC m=+146.981199735 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.811998 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.812058 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.896778 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.897400 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.397370708 +0000 UTC m=+147.084815910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:58 crc kubenswrapper[4664]: I1013 06:48:58.898111 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:58 crc kubenswrapper[4664]: E1013 06:48:58.898546 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.398536411 +0000 UTC m=+147.085981603 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.000693 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.001068 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.501053885 +0000 UTC m=+147.188499077 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.102442 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.103253 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.603240981 +0000 UTC m=+147.290686173 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.205991 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.206376 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.706356922 +0000 UTC m=+147.393802114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.244894 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" event={"ID":"5bf1cd76-69a8-496f-a6a9-1adc0022d829","Type":"ContainerStarted","Data":"7afcbc51532b7e7ccdcc08045df6b3107a6beb1907967517e9a12d3ce8a3175e"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.245175 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.249909 4664 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-p6fvb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.249979 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" podUID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.261535 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" event={"ID":"c8969bc0-07ec-44a0-98fd-03669c3557db","Type":"ContainerStarted","Data":"1f1135c2b4c1dd84813bb88cd5d85a54998ed6c7c84565bc685be4fa6a8653ac"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.272604 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" event={"ID":"9eddc646-c3eb-4276-b359-f2c372c16827","Type":"ContainerStarted","Data":"c54bb2d0500ba6fd19a53963d702b7b4109f695fd459c89293159febc9d6498c"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.274204 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" event={"ID":"79847ce1-e701-447b-b9d1-a0609b0b09ab","Type":"ContainerStarted","Data":"95f8386c83ee41f76f1e1264d05f41a4ba3da19f7c4a427be4326855e1a96698"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.274289 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" event={"ID":"79847ce1-e701-447b-b9d1-a0609b0b09ab","Type":"ContainerStarted","Data":"65a89cbad5c28685f2087e9391eba2f2639d32927fea892c13e276d95a208c97"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.274798 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.281626 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" podStartSLOduration=126.281610932 podStartE2EDuration="2m6.281610932s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.280935123 +0000 UTC m=+146.968380335" watchObservedRunningTime="2025-10-13 06:48:59.281610932 +0000 UTC m=+146.969056124" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.283525 4664 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-f66qq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.283567 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" podUID="79847ce1-e701-447b-b9d1-a0609b0b09ab" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.285733 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" event={"ID":"425e3312-93c3-42cc-a840-a6cbb635e244","Type":"ContainerStarted","Data":"8b9d7c6ede3c9f0b22bba018ae134acfb951e8df33df13a0eb3c845ac4cc0a2f"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.285795 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" event={"ID":"425e3312-93c3-42cc-a840-a6cbb635e244","Type":"ContainerStarted","Data":"0e5b17b714730948d005e3f11561a6da0d7c2213f333270f231ee440ba6b5fc3"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.286230 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.288130 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" event={"ID":"55e9e787-3f8b-4a88-8693-7e0b265b4724","Type":"ContainerStarted","Data":"4f9b62ce0f0fce57c0082413abcba84fcea6e0a5c9d766ed246f47b723d1157d"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.291311 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" start-of-body= Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.291391 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.305422 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lfw8h" event={"ID":"1f021cde-f007-411e-95c6-6be5f2bc10a4","Type":"ContainerStarted","Data":"0f47c12e7c6fe42531d2d3ab4884d99b6f3973b6418a4102fbc43ce8f2317bf1"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.307685 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" podStartSLOduration=126.307668552 podStartE2EDuration="2m6.307668552s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.306070928 +0000 UTC m=+146.993516120" watchObservedRunningTime="2025-10-13 06:48:59.307668552 +0000 UTC m=+146.995113734" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.313841 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.315394 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.815369638 +0000 UTC m=+147.502814830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.340739 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" event={"ID":"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549","Type":"ContainerStarted","Data":"9e2011f46eb121206644b4823cd39cf24fb0aee818323f1623797e3a07f99827"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.350689 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tsfsd" event={"ID":"a3bc8d71-befb-4d03-9aa9-9ede609c04cb","Type":"ContainerStarted","Data":"837d7624a586198ee6db56b513b1872c339459b9d5fcb1256896835a127b0b38"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.352493 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podStartSLOduration=126.352469009 podStartE2EDuration="2m6.352469009s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.351544032 +0000 UTC m=+147.038989224" watchObservedRunningTime="2025-10-13 06:48:59.352469009 +0000 UTC m=+147.039914201" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.376227 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" event={"ID":"9acbdb51-299c-4f92-9dbc-72cc388b9985","Type":"ContainerStarted","Data":"9fbdd542d7619601266b4fe75aea8d3dc500b3288ec55378a84d09fd991f2fb6"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.400302 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" event={"ID":"8051931c-c553-49fb-82bc-f584e6a34ff2","Type":"ContainerStarted","Data":"f108683221121d66c00459d28fc57e4f5fa172e97e3d95e2836246b3e84ae5c8"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.402176 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" podStartSLOduration=126.402164072 podStartE2EDuration="2m6.402164072s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.401692309 +0000 UTC m=+147.089137501" watchObservedRunningTime="2025-10-13 06:48:59.402164072 +0000 UTC m=+147.089609264" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.418745 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.423012 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:48:59.922982885 +0000 UTC m=+147.610428077 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.439197 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zl55w" event={"ID":"3344b8d9-b31e-43f7-81fb-1ebc893f5a90","Type":"ContainerStarted","Data":"7004589a94eafb06378a5593ef9a35514d8b7234a38c63cf550e65b6b083ec38"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.450250 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" event={"ID":"89073c83-c4b0-460e-8011-433081541325","Type":"ContainerStarted","Data":"894ab5deb891e5dfa91d2b388b8ac0e959f15d17893bf720e30f7646bb590257"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.478166 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" event={"ID":"2dc1745c-b329-40b4-8f42-e44e563f452f","Type":"ContainerStarted","Data":"6f2d9a5df2499f5f207bfa031ea5d0aebb284715acacd0b16560512be20fb229"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.493003 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4bxj9" event={"ID":"207d72d8-7daf-4223-9b7a-25c4edfdb490","Type":"ContainerStarted","Data":"6d583ad9075204a43d3edc2da689d253133491dc76b278ac8b780271c9ccb87e"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.500138 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.505332 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2chwr" event={"ID":"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d","Type":"ContainerStarted","Data":"bef4dd0f5761e647331079759bd5786598fe6feb709da17fcdf02b7f813484f9"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.514444 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:48:59 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:48:59 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:48:59 crc kubenswrapper[4664]: healthz check failed Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.514517 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.536625 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.538338 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.03832247 +0000 UTC m=+147.725767842 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.560797 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" event={"ID":"fa3d6ad9-c819-4a98-9ded-9371e6259f9b","Type":"ContainerStarted","Data":"abc1af3d43429ad3e07a97c1108875263a08c20705638a0e2f1e3efb7c823604"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.573116 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-4bxj9" podStartSLOduration=126.573090054 podStartE2EDuration="2m6.573090054s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.572787876 +0000 UTC m=+147.260233078" watchObservedRunningTime="2025-10-13 06:48:59.573090054 +0000 UTC m=+147.260535246" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.576767 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" podStartSLOduration=126.576755337 podStartE2EDuration="2m6.576755337s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.498667918 +0000 UTC m=+147.186113110" watchObservedRunningTime="2025-10-13 06:48:59.576755337 +0000 UTC m=+147.264200539" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.581256 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" event={"ID":"6b78fa09-bd0f-4128-8652-09d27bad2427","Type":"ContainerStarted","Data":"db3ac5f86c04005afe1b457e55932bc882902e3c0a7a56b9083d7005555f8809"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.638288 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.638477 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.138441007 +0000 UTC m=+147.825886199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.638755 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.642205 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.142188052 +0000 UTC m=+147.829633244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.662263 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" event={"ID":"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8","Type":"ContainerStarted","Data":"ba4e883aaaf164e6d6f0ccdbc17bcb7f50b769863a5e418b2e6fa7fa7d2cc38b"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.718457 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" event={"ID":"bd7bda5a-9384-4f1e-a903-ca0d40bf7c2c","Type":"ContainerStarted","Data":"066354cd0761bedb688c17cddb46888f7eead4f26a179eafd0e4d7d39b29352b"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.739596 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.739958 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.239921903 +0000 UTC m=+147.927367095 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.740017 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.740533 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.240503889 +0000 UTC m=+147.927949081 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.754057 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" event={"ID":"6beaedb4-8ff1-4956-a04c-b4007e2d1c50","Type":"ContainerStarted","Data":"224ff3c0bee9d10f6f2ede628df0aaef3ac7b027a4622c3a3ff6e33ea61c70e0"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.754106 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" event={"ID":"6beaedb4-8ff1-4956-a04c-b4007e2d1c50","Type":"ContainerStarted","Data":"4b1337af47c42d209d75a549c0bcd75e9b0702e214dd23f207909676a43180da"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.754126 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.754178 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vpv87" podStartSLOduration=125.754156942 podStartE2EDuration="2m5.754156942s" podCreationTimestamp="2025-10-13 06:46:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.753427331 +0000 UTC m=+147.440872523" watchObservedRunningTime="2025-10-13 06:48:59.754156942 +0000 UTC m=+147.441602124" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.755946 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.756008 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.790721 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podStartSLOduration=126.790699376 podStartE2EDuration="2m6.790699376s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.788834624 +0000 UTC m=+147.476279816" watchObservedRunningTime="2025-10-13 06:48:59.790699376 +0000 UTC m=+147.478144568" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.805732 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" event={"ID":"55a00b0d-9ade-48a5-963f-fd17e913ef4b","Type":"ContainerStarted","Data":"6477321bb824d688f5297d98bf0dcf6b6988002fc70bd3c2170382c30df4952b"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.809422 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" event={"ID":"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe","Type":"ContainerStarted","Data":"ad0086382c8bda967a4682c23e25836786f80d51984b16a8383078c2642bdef6"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.813131 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" event={"ID":"e5fdb8ec-d75b-4d58-9f56-1952df83cc29","Type":"ContainerStarted","Data":"0a5342c1ff49230602500441de7b3d2ef0a3a1e4bc4dad2f10562437df50fabb"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.815204 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xfkng" event={"ID":"821e4208-46ce-483c-a06f-83d5e8d74cf0","Type":"ContainerStarted","Data":"f4f96037f174c13a61ce179f1519c7e5c28a86a39158d7586b4aaaad4b41d9ec"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.822338 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ml2tj" event={"ID":"fb673675-43af-441e-9cf3-f5f283ef9558","Type":"ContainerStarted","Data":"6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.846463 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.847928 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" podStartSLOduration=126.84790757 podStartE2EDuration="2m6.84790757s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.844684329 +0000 UTC m=+147.532129521" watchObservedRunningTime="2025-10-13 06:48:59.84790757 +0000 UTC m=+147.535352762" Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.848408 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.348376083 +0000 UTC m=+148.035821275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.852555 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" event={"ID":"1d87b0ff-e00c-4918-b7f9-a08a425a5012","Type":"ContainerStarted","Data":"bb6e870c2f92cba2078cb02652c31251db1d82452046d4d3512a8383635cfa18"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.852685 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" event={"ID":"1d87b0ff-e00c-4918-b7f9-a08a425a5012","Type":"ContainerStarted","Data":"d4addf6e8303c09b8df09e673682391277ac4b3d3108e25c99b24c6426c65e14"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.854089 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.857417 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.857565 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.876015 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-z4pxt" event={"ID":"13260ddb-8d39-4a5c-bc14-15838dc93ff1","Type":"ContainerStarted","Data":"bc317ec77f7de42a1dc8bf2369bf12a12d4e5241507077d58a32a8cc4de88e86"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.880094 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podStartSLOduration=126.880076442 podStartE2EDuration="2m6.880076442s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.878863008 +0000 UTC m=+147.566308200" watchObservedRunningTime="2025-10-13 06:48:59.880076442 +0000 UTC m=+147.567521624" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.921314 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" event={"ID":"a8757a02-6c3c-4f30-9ede-32cc55c8b616","Type":"ContainerStarted","Data":"a98139c43b06fce8e3c057b85c9d2396bf18d9b9b4b163590623e118f31c1dc7"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.933199 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-z4pxt" podStartSLOduration=7.933172681 podStartE2EDuration="7.933172681s" podCreationTimestamp="2025-10-13 06:48:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.931987998 +0000 UTC m=+147.619433200" watchObservedRunningTime="2025-10-13 06:48:59.933172681 +0000 UTC m=+147.620617873" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.934617 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" event={"ID":"cf40d27a-b0f2-4169-ad05-5ff9e4d46595","Type":"ContainerStarted","Data":"a95d1339d7176398f78cf8cda28f1d45860ce83e41d8bedbeda820253d9855de"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.953325 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:48:59 crc kubenswrapper[4664]: E1013 06:48:59.954350 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.454334125 +0000 UTC m=+148.141779317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.963928 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" event={"ID":"89121f81-0dec-49f3-adf1-141069760008","Type":"ContainerStarted","Data":"5d1f6ce62ddbd1719cfe6e5b30c8bbf3a5d7f29649aae17f42bb6761c04be5bb"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.963993 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" event={"ID":"89121f81-0dec-49f3-adf1-141069760008","Type":"ContainerStarted","Data":"e1ed3f00316e4784d0eeff41be3e4477787bd25544c087bb588894ebecee3334"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.965645 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nnfdv" podStartSLOduration=126.965614941 podStartE2EDuration="2m6.965614941s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:48:59.963756648 +0000 UTC m=+147.651201840" watchObservedRunningTime="2025-10-13 06:48:59.965614941 +0000 UTC m=+147.653060133" Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.974331 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" event={"ID":"9306ec08-b511-460b-be5d-a7f698672ffe","Type":"ContainerStarted","Data":"86d42f3370f7f720552ad6ddd0e36d5f99d642d8548387c95b1da455dff542cf"} Oct 13 06:48:59 crc kubenswrapper[4664]: I1013 06:48:59.974385 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" event={"ID":"9306ec08-b511-460b-be5d-a7f698672ffe","Type":"ContainerStarted","Data":"83dbb3ae1a98a7045622375e57970d0be809aad2ab8c5914f5ecfde6e05029ad"} Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.057655 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.557635031 +0000 UTC m=+148.245080213 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.057684 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.058111 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.058417 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.558410903 +0000 UTC m=+148.245856095 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.110068 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h5ptp" podStartSLOduration=127.11004088 podStartE2EDuration="2m7.11004088s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:00.046069476 +0000 UTC m=+147.733514668" watchObservedRunningTime="2025-10-13 06:49:00.11004088 +0000 UTC m=+147.797486072" Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.159978 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.160292 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.660258208 +0000 UTC m=+148.347703400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.160384 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.161495 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.661468333 +0000 UTC m=+148.348913525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.264812 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.265005 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.764962055 +0000 UTC m=+148.452407247 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.265241 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.265885 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.76587858 +0000 UTC m=+148.453323772 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.369590 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.372200 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.87215057 +0000 UTC m=+148.559595762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.473490 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.474116 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:00.974098938 +0000 UTC m=+148.661544130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.501001 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:00 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:00 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:00 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.501071 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.574497 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.574942 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.074922735 +0000 UTC m=+148.762367937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.675962 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.677039 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.177025728 +0000 UTC m=+148.864470920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.777359 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.777827 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.277785403 +0000 UTC m=+148.965230595 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.879060 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.879421 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.379404772 +0000 UTC m=+149.066849964 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.979529 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:00 crc kubenswrapper[4664]: E1013 06:49:00.979817 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.479788487 +0000 UTC m=+149.167233679 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.980960 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" event={"ID":"e5fdb8ec-d75b-4d58-9f56-1952df83cc29","Type":"ContainerStarted","Data":"2372b871627698cb5bb0e6e5386e186fd82fc5372bec4b898bd892673fdabff5"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.981002 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" event={"ID":"e5fdb8ec-d75b-4d58-9f56-1952df83cc29","Type":"ContainerStarted","Data":"fe1e97d502c77af071f476baf5839fdf20314dcd15699f613ffe597070dc3c97"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.982950 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lfw8h" event={"ID":"1f021cde-f007-411e-95c6-6be5f2bc10a4","Type":"ContainerStarted","Data":"321e5998252414a5b6c27d869d68ec76731e63c35c6dc3e8215c5084f16af66c"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.983341 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-lfw8h" Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.985759 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" event={"ID":"55e9e787-3f8b-4a88-8693-7e0b265b4724","Type":"ContainerStarted","Data":"a100864a6809fe66b29f290a9b54ac5c89fdf9793407e9fb7417b01dc8202c99"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.988313 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" event={"ID":"9eddc646-c3eb-4276-b359-f2c372c16827","Type":"ContainerStarted","Data":"de8c068ff56b5f45efa5caecfa6575c1cd4bc120de99d737ed5a71b3bfe68973"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.988370 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" event={"ID":"9eddc646-c3eb-4276-b359-f2c372c16827","Type":"ContainerStarted","Data":"2d0e553a3d1f80a4e0fb1c74da0c62ce380b3953aca155e6aa786f9573763624"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.989927 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" event={"ID":"e67f1677-e72c-4ee7-ae5e-b1d80a6597fe","Type":"ContainerStarted","Data":"d50d4149f866c81aa96cdbae669a602429e7e3b71d4f0084e13e256de4a12c81"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.991644 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" event={"ID":"2dc1745c-b329-40b4-8f42-e44e563f452f","Type":"ContainerStarted","Data":"36e724049a6c6907fe01be8de777fefaa8280876a0d41c9a8de155b4755c3d9e"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.993070 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2chwr" event={"ID":"b08976d8-a2e6-4a97-9f1c-f7ca4b5ec98d","Type":"ContainerStarted","Data":"9caebe2808e8053295e208777da67b068966534556526d4d989e0d7e2242becc"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.994895 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" event={"ID":"9acbdb51-299c-4f92-9dbc-72cc388b9985","Type":"ContainerStarted","Data":"41405a3ca2c5290c4d12923ee67df6e9a8dc640e985c9d451a767e035dad9e09"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.994920 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" event={"ID":"9acbdb51-299c-4f92-9dbc-72cc388b9985","Type":"ContainerStarted","Data":"0d21da0a11110326aea63ac40d7cf8ab00a3ca457b68c585d673d77b7a4c963d"} Oct 13 06:49:00 crc kubenswrapper[4664]: I1013 06:49:00.999349 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" event={"ID":"c8969bc0-07ec-44a0-98fd-03669c3557db","Type":"ContainerStarted","Data":"86d16c4ddd6de32e2ee9ba34767a7df547a35f0565bad0822cdc2b6d19e35fd8"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.001157 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" event={"ID":"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8","Type":"ContainerStarted","Data":"89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.001745 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.003334 4664 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-47mrj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.29:6443/healthz\": dial tcp 10.217.0.29:6443: connect: connection refused" start-of-body= Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.003366 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" podUID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.29:6443/healthz\": dial tcp 10.217.0.29:6443: connect: connection refused" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.004001 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" event={"ID":"8051931c-c553-49fb-82bc-f584e6a34ff2","Type":"ContainerStarted","Data":"f5986e0dbdd7199a29934a1593f49ee1ed1f2150ca7d23e8fbb5bb639faa38ff"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.004068 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" event={"ID":"8051931c-c553-49fb-82bc-f584e6a34ff2","Type":"ContainerStarted","Data":"242213a094086f75203f2ca5f9d92cad3686b2cc93221a61ad7660ca05774af4"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.004178 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.007103 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" event={"ID":"fa3d6ad9-c819-4a98-9ded-9371e6259f9b","Type":"ContainerStarted","Data":"c216c2121f73391f6005e79199a22b2f0a5b892a59eae88efeff4fd407cdb402"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.009238 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" event={"ID":"6b78fa09-bd0f-4128-8652-09d27bad2427","Type":"ContainerStarted","Data":"eb2d5bce6705d4e1d9e5e4366c190dc8c56a4669b486b3b904a54c7c3489d060"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.025310 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" event={"ID":"a8757a02-6c3c-4f30-9ede-32cc55c8b616","Type":"ContainerStarted","Data":"4ca02e841697b17331c60887104db67be117e14d8d3dfc3a183fd9fb6bf26e66"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.025415 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" event={"ID":"a8757a02-6c3c-4f30-9ede-32cc55c8b616","Type":"ContainerStarted","Data":"b397c5dcf0fd391adb2b595ec68a78821c4ced4891c2b3a0fe85b97c57d9f2e2"} Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.027615 4664 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-p6fvb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.027668 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" podUID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.039297 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f67l8" podStartSLOduration=128.039247245 podStartE2EDuration="2m8.039247245s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:00.115573995 +0000 UTC m=+147.803019197" watchObservedRunningTime="2025-10-13 06:49:01.039247245 +0000 UTC m=+148.726692447" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.039409 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.041435 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-zjsrt" podStartSLOduration=128.041429286 podStartE2EDuration="2m8.041429286s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.037862206 +0000 UTC m=+148.725307398" watchObservedRunningTime="2025-10-13 06:49:01.041429286 +0000 UTC m=+148.728874478" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.085173 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.087383 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.587360554 +0000 UTC m=+149.274805916 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.140248 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.189940 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.190575 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.690551847 +0000 UTC m=+149.377997039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.192446 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-lfw8h" podStartSLOduration=9.19242811 podStartE2EDuration="9.19242811s" podCreationTimestamp="2025-10-13 06:48:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.105317907 +0000 UTC m=+148.792763099" watchObservedRunningTime="2025-10-13 06:49:01.19242811 +0000 UTC m=+148.879873302" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.193295 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" podStartSLOduration=128.193290494 podStartE2EDuration="2m8.193290494s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.185826584 +0000 UTC m=+148.873271786" watchObservedRunningTime="2025-10-13 06:49:01.193290494 +0000 UTC m=+148.880735676" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.228671 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-czxhw" podStartSLOduration=128.228648585 podStartE2EDuration="2m8.228648585s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.22703081 +0000 UTC m=+148.914476002" watchObservedRunningTime="2025-10-13 06:49:01.228648585 +0000 UTC m=+148.916093777" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.234592 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.285922 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-2chwr" podStartSLOduration=9.285908361 podStartE2EDuration="9.285908361s" podCreationTimestamp="2025-10-13 06:48:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.285235951 +0000 UTC m=+148.972681153" watchObservedRunningTime="2025-10-13 06:49:01.285908361 +0000 UTC m=+148.973353553" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.295785 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.296192 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.796177758 +0000 UTC m=+149.483622950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.397014 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.397147 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.897121039 +0000 UTC m=+149.584566231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.397395 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.397964 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.897955203 +0000 UTC m=+149.585400395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.420654 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-62tk8" podStartSLOduration=128.420625718 podStartE2EDuration="2m8.420625718s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.372380796 +0000 UTC m=+149.059825988" watchObservedRunningTime="2025-10-13 06:49:01.420625718 +0000 UTC m=+149.108070910" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.461775 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v46hd" podStartSLOduration=128.461741981 podStartE2EDuration="2m8.461741981s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.422769119 +0000 UTC m=+149.110214311" watchObservedRunningTime="2025-10-13 06:49:01.461741981 +0000 UTC m=+149.149187193" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.461968 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zh2dr" podStartSLOduration=128.461961597 podStartE2EDuration="2m8.461961597s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.46135411 +0000 UTC m=+149.148799302" watchObservedRunningTime="2025-10-13 06:49:01.461961597 +0000 UTC m=+149.149406799" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.497961 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.498545 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:01.998520713 +0000 UTC m=+149.685965905 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.508953 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:01 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:01 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:01 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.509022 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.512053 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jv45s" podStartSLOduration=128.51202074 podStartE2EDuration="2m8.51202074s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.510332043 +0000 UTC m=+149.197777255" watchObservedRunningTime="2025-10-13 06:49:01.51202074 +0000 UTC m=+149.199465922" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.600880 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.601375 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.101360166 +0000 UTC m=+149.788805358 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.702368 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.702570 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.202534282 +0000 UTC m=+149.889979474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.702883 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.703239 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.203224032 +0000 UTC m=+149.890669214 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.728481 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-49z5z" podStartSLOduration=128.728457709 podStartE2EDuration="2m8.728457709s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.630370799 +0000 UTC m=+149.317816011" watchObservedRunningTime="2025-10-13 06:49:01.728457709 +0000 UTC m=+149.415902901" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.728789 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" podStartSLOduration=128.728784668 podStartE2EDuration="2m8.728784668s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.728395368 +0000 UTC m=+149.415840570" watchObservedRunningTime="2025-10-13 06:49:01.728784668 +0000 UTC m=+149.416229860" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.804652 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.804877 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.304841571 +0000 UTC m=+149.992286763 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.805000 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.805415 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.305405637 +0000 UTC m=+149.992850829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.818344 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-t4v67" podStartSLOduration=128.818313609 podStartE2EDuration="2m8.818313609s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.811656332 +0000 UTC m=+149.499101524" watchObservedRunningTime="2025-10-13 06:49:01.818313609 +0000 UTC m=+149.505758801" Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.905933 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.906144 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.406107771 +0000 UTC m=+150.093552963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.906289 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:01 crc kubenswrapper[4664]: E1013 06:49:01.906815 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.406788269 +0000 UTC m=+150.094233451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:01 crc kubenswrapper[4664]: I1013 06:49:01.943942 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" podStartSLOduration=128.943919781 podStartE2EDuration="2m8.943919781s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:01.94319568 +0000 UTC m=+149.630640892" watchObservedRunningTime="2025-10-13 06:49:01.943919781 +0000 UTC m=+149.631364983" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.007052 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.007242 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.507211755 +0000 UTC m=+150.194656947 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.007358 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.007732 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.507724529 +0000 UTC m=+150.195169721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.027087 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.027173 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.042430 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" event={"ID":"55e9e787-3f8b-4a88-8693-7e0b265b4724","Type":"ContainerStarted","Data":"c0497166edb15c70c356dcff6503218c655e7ead9e01e82f6598be9514460e69"} Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.108298 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.108647 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.608618878 +0000 UTC m=+150.296064070 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.108933 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.108971 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.109093 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.109131 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.109339 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.114886 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.614877044 +0000 UTC m=+150.302322236 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.120913 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.131657 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.132603 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.144935 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.216225 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.216551 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.716537094 +0000 UTC m=+150.403982286 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.317763 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.318151 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.818139253 +0000 UTC m=+150.505584445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.359641 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.376220 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.376256 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.423546 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.454171 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:02.954123806 +0000 UTC m=+150.641569028 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.505473 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:02 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:02 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:02 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.505553 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.526698 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.527139 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.027125213 +0000 UTC m=+150.714570405 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.628317 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.628601 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.128586238 +0000 UTC m=+150.816031420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.730350 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.730871 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.230846055 +0000 UTC m=+150.918291247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.832203 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.832422 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.332389172 +0000 UTC m=+151.019834364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.832564 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.833001 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.332984939 +0000 UTC m=+151.020430131 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.898890 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d6645"] Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.900863 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.919537 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.933316 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.933651 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-utilities\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.933695 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkcxt\" (UniqueName: \"kubernetes.io/projected/b874d683-d666-4c72-8a82-7bfb88b53abd-kube-api-access-nkcxt\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.933737 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-catalog-content\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:02 crc kubenswrapper[4664]: E1013 06:49:02.933843 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.433828877 +0000 UTC m=+151.121274069 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:02 crc kubenswrapper[4664]: I1013 06:49:02.956336 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d6645"] Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.034861 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkcxt\" (UniqueName: \"kubernetes.io/projected/b874d683-d666-4c72-8a82-7bfb88b53abd-kube-api-access-nkcxt\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.034927 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-catalog-content\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.034973 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.035008 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-utilities\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.035503 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-utilities\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.036128 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-catalog-content\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.036442 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.536428463 +0000 UTC m=+151.223873655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.046085 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.046177 4664 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-47mrj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.29:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.046237 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" podUID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.29:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.046175 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.112751 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" event={"ID":"55e9e787-3f8b-4a88-8693-7e0b265b4724","Type":"ContainerStarted","Data":"8fab346e6ded3de1b86e3ccbc87478a710f30110fb5e385577be4c14a6b620c5"} Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.131547 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5htx8"] Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.133482 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.135631 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.135704 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.635687106 +0000 UTC m=+151.323132298 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.145570 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.155354 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.655335767 +0000 UTC m=+151.342780959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.176231 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkcxt\" (UniqueName: \"kubernetes.io/projected/b874d683-d666-4c72-8a82-7bfb88b53abd-kube-api-access-nkcxt\") pod \"community-operators-d6645\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.183680 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.185622 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5htx8"] Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.217572 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.255063 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.255330 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw2kr\" (UniqueName: \"kubernetes.io/projected/2767d696-0a63-452a-931a-2634decc57d1-kube-api-access-cw2kr\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.257515 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.757498852 +0000 UTC m=+151.444944044 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.257568 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-utilities\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.257602 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-catalog-content\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.278946 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zjm5p"] Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.279901 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.359669 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-catalog-content\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.359715 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-utilities\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.359764 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-utilities\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.359948 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-catalog-content\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.360028 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw2kr\" (UniqueName: \"kubernetes.io/projected/2767d696-0a63-452a-931a-2634decc57d1-kube-api-access-cw2kr\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.360130 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd7xn\" (UniqueName: \"kubernetes.io/projected/83ca1b4e-cbbb-43eb-9d12-853db933995f-kube-api-access-sd7xn\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.360227 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-utilities\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.360231 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.360609 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.860592403 +0000 UTC m=+151.548037595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.361080 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-catalog-content\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.393453 4664 patch_prober.go:28] interesting pod/downloads-7954f5f757-dh2vl container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.393499 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dh2vl" podUID="d981b25e-c830-4f47-9851-d51db10ed5bf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.395648 4664 patch_prober.go:28] interesting pod/downloads-7954f5f757-dh2vl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.395673 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dh2vl" podUID="d981b25e-c830-4f47-9851-d51db10ed5bf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.464481 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.465008 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd7xn\" (UniqueName: \"kubernetes.io/projected/83ca1b4e-cbbb-43eb-9d12-853db933995f-kube-api-access-sd7xn\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.465076 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-catalog-content\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.465097 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-utilities\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.465620 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-utilities\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.465962 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:03.965940727 +0000 UTC m=+151.653385909 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.466629 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-catalog-content\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.503550 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:03 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:03 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:03 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.504057 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.548109 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zjm5p"] Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.548562 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw2kr\" (UniqueName: \"kubernetes.io/projected/2767d696-0a63-452a-931a-2634decc57d1-kube-api-access-cw2kr\") pod \"certified-operators-5htx8\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.571009 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.571511 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.071493776 +0000 UTC m=+151.758938968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.619303 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xjqrl"] Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.629111 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd7xn\" (UniqueName: \"kubernetes.io/projected/83ca1b4e-cbbb-43eb-9d12-853db933995f-kube-api-access-sd7xn\") pod \"community-operators-zjm5p\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.630983 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.658863 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.676572 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.677298 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.177256082 +0000 UTC m=+151.864701274 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.680970 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fvrq\" (UniqueName: \"kubernetes.io/projected/0a5c1831-9145-4fd7-943b-c49bd95edf2d-kube-api-access-7fvrq\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.681039 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-utilities\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.681122 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.681143 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-catalog-content\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.681555 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.181542312 +0000 UTC m=+151.868987504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.734949 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.784488 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.785214 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fvrq\" (UniqueName: \"kubernetes.io/projected/0a5c1831-9145-4fd7-943b-c49bd95edf2d-kube-api-access-7fvrq\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.785270 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-utilities\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.785348 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-catalog-content\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.786081 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-catalog-content\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.786163 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.286147784 +0000 UTC m=+151.973592976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.787536 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-utilities\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.812943 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xjqrl"] Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.912191 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.930938 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:03 crc kubenswrapper[4664]: E1013 06:49:03.931501 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.43148914 +0000 UTC m=+152.118934332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:03 crc kubenswrapper[4664]: I1013 06:49:03.942675 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fvrq\" (UniqueName: \"kubernetes.io/projected/0a5c1831-9145-4fd7-943b-c49bd95edf2d-kube-api-access-7fvrq\") pod \"certified-operators-xjqrl\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.035428 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.035728 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.535712533 +0000 UTC m=+152.223157725 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.050253 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.141282 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.141744 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.641726454 +0000 UTC m=+152.329171646 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.162263 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"375305a42ecf01186c05ff1971c60c2b298f9340cb595e928ad7ac94aafc6ee1"} Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.191579 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.192652 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.229418 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.229454 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.241926 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.243397 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.243715 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.743699674 +0000 UTC m=+152.431144856 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: W1013 06:49:04.337838 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-67e891ace51f8927725cb8d589f7c6fd39c1d7c1338ee27b8d31aa70d200e861 WatchSource:0}: Error finding container 67e891ace51f8927725cb8d589f7c6fd39c1d7c1338ee27b8d31aa70d200e861: Status 404 returned error can't find the container with id 67e891ace51f8927725cb8d589f7c6fd39c1d7c1338ee27b8d31aa70d200e861 Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.344230 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.346786 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.846770624 +0000 UTC m=+152.534215816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.391292 4664 patch_prober.go:28] interesting pod/apiserver-76f77b778f-s7fbf container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]log ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]etcd ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/start-apiserver-admission-initializer ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/generic-apiserver-start-informers ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/max-in-flight-filter ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/storage-object-count-tracker-hook ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/image.openshift.io-apiserver-caches ok Oct 13 06:49:04 crc kubenswrapper[4664]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Oct 13 06:49:04 crc kubenswrapper[4664]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/project.openshift.io-projectcache ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/openshift.io-startinformers ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/openshift.io-restmapperupdater ok Oct 13 06:49:04 crc kubenswrapper[4664]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Oct 13 06:49:04 crc kubenswrapper[4664]: livez check failed Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.391377 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" podUID="fa3d6ad9-c819-4a98-9ded-9371e6259f9b" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.449585 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.449846 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.949819043 +0000 UTC m=+152.637264225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.450956 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.451356 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:04.951343707 +0000 UTC m=+152.638788899 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.507069 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:04 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:04 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:04 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.507137 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.552479 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.553086 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.053063738 +0000 UTC m=+152.740508930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: W1013 06:49:04.586768 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb874d683_d666_4c72_8a82_7bfb88b53abd.slice/crio-9597d917f2f70ac3a1f9138194e14f878788871af8ce1e1c0181f22cbdba20a9 WatchSource:0}: Error finding container 9597d917f2f70ac3a1f9138194e14f878788871af8ce1e1c0181f22cbdba20a9: Status 404 returned error can't find the container with id 9597d917f2f70ac3a1f9138194e14f878788871af8ce1e1c0181f22cbdba20a9 Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.588838 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d6645"] Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.663522 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.664344 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.164328148 +0000 UTC m=+152.851773340 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.765132 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.765788 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.265748052 +0000 UTC m=+152.953193244 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.812954 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zjm5p"] Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.869934 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.870453 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.370438477 +0000 UTC m=+153.057883669 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:04 crc kubenswrapper[4664]: I1013 06:49:04.973456 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:04 crc kubenswrapper[4664]: E1013 06:49:04.974057 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.474019222 +0000 UTC m=+153.161464414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.108776 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.109478 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.60945971 +0000 UTC m=+153.296904902 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.147507 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.147585 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.151214 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5htx8"] Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.162256 4664 patch_prober.go:28] interesting pod/console-f9d7485db-ml2tj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.34:8443/health\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.162326 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-ml2tj" podUID="fb673675-43af-441e-9cf3-f5f283ef9558" containerName="console" probeResult="failure" output="Get \"https://10.217.0.34:8443/health\": dial tcp 10.217.0.34:8443: connect: connection refused" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.181690 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"28f9b984eccb7f3384177c84cbf9f79fc4b0bf1c479c69deeaac0b95d3c9a4d7"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.181738 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"088e8b892a042484ccef2ead05c173819eb0a48a620b898437b70c450d5fd94d"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.186251 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" event={"ID":"55e9e787-3f8b-4a88-8693-7e0b265b4724","Type":"ContainerStarted","Data":"ea5e0f0742e3ce038d732c535acdf457829c285e3553aa7a94d68e6ca47af6c9"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.187281 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5htx8" event={"ID":"2767d696-0a63-452a-931a-2634decc57d1","Type":"ContainerStarted","Data":"e6801dfb9a9ef47b18b361b3b7c722d68f218d489fd7743619c14d34f6f38705"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.187960 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zjm5p" event={"ID":"83ca1b4e-cbbb-43eb-9d12-853db933995f","Type":"ContainerStarted","Data":"99e13d0084a86784e1dbdaafc6dac4e8f66a7e877ce2c4d1f297c68509302d96"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.188754 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"fa90b58ebaa4ba84f0a8f4c755654e6c90371ec1c8748f0a1ec75ad1a6d70377"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.188772 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"67e891ace51f8927725cb8d589f7c6fd39c1d7c1338ee27b8d31aa70d200e861"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.189099 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.193998 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6fb4b47875bea3e0bf66f2fdd5d76f6557ad86a13ce2cf150b190e800a257a05"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.197354 4664 generic.go:334] "Generic (PLEG): container finished" podID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerID="30246692bf98161378afc2133edf374dd79e2976f16eb04aa7a8660a14f4a90f" exitCode=0 Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.198456 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6645" event={"ID":"b874d683-d666-4c72-8a82-7bfb88b53abd","Type":"ContainerDied","Data":"30246692bf98161378afc2133edf374dd79e2976f16eb04aa7a8660a14f4a90f"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.198473 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6645" event={"ID":"b874d683-d666-4c72-8a82-7bfb88b53abd","Type":"ContainerStarted","Data":"9597d917f2f70ac3a1f9138194e14f878788871af8ce1e1c0181f22cbdba20a9"} Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.216135 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.218336 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.718302011 +0000 UTC m=+153.405747203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.221670 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-97b4k" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.234159 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.256881 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6gqwj"] Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.265405 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.276057 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.309662 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gqwj"] Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.318888 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-utilities\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.318980 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.319014 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s42nk\" (UniqueName: \"kubernetes.io/projected/e478ea82-bff7-4555-9004-1252b36a6a77-kube-api-access-s42nk\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.319056 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-catalog-content\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.319451 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.819434987 +0000 UTC m=+153.506880169 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.326885 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.424454 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.424715 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-catalog-content\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.424767 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-utilities\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.424894 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s42nk\" (UniqueName: \"kubernetes.io/projected/e478ea82-bff7-4555-9004-1252b36a6a77-kube-api-access-s42nk\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.425233 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:05.925211933 +0000 UTC m=+153.612657115 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.425697 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-catalog-content\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.426896 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-utilities\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.482166 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xjqrl"] Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.499773 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.502136 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s42nk\" (UniqueName: \"kubernetes.io/projected/e478ea82-bff7-4555-9004-1252b36a6a77-kube-api-access-s42nk\") pod \"redhat-marketplace-6gqwj\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.508499 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:05 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:05 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:05 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.508554 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.526840 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.527968 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.027940133 +0000 UTC m=+153.715385325 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.628272 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.629141 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.129032838 +0000 UTC m=+153.816478030 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.629575 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" podStartSLOduration=13.629564552 podStartE2EDuration="13.629564552s" podCreationTimestamp="2025-10-13 06:48:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:05.50823324 +0000 UTC m=+153.195678422" watchObservedRunningTime="2025-10-13 06:49:05.629564552 +0000 UTC m=+153.317009744" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.664393 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.674302 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z6s2t"] Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.675382 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.677982 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.733034 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-catalog-content\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.733512 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-utilities\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.733586 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.733628 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nt6s\" (UniqueName: \"kubernetes.io/projected/8980bd64-f06f-424e-91cb-41b402bfc136-kube-api-access-6nt6s\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.736568 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.236545092 +0000 UTC m=+153.923990284 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.737321 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6s2t"] Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.835560 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.835987 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-utilities\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.836079 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nt6s\" (UniqueName: \"kubernetes.io/projected/8980bd64-f06f-424e-91cb-41b402bfc136-kube-api-access-6nt6s\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.836116 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-catalog-content\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.836612 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-catalog-content\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.836709 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.33668582 +0000 UTC m=+154.024131012 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.836963 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-utilities\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.924946 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nt6s\" (UniqueName: \"kubernetes.io/projected/8980bd64-f06f-424e-91cb-41b402bfc136-kube-api-access-6nt6s\") pod \"redhat-marketplace-z6s2t\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.930295 4664 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Oct 13 06:49:05 crc kubenswrapper[4664]: I1013 06:49:05.947977 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:05 crc kubenswrapper[4664]: E1013 06:49:05.948568 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.448551757 +0000 UTC m=+154.135996949 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.038137 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.050750 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:06 crc kubenswrapper[4664]: E1013 06:49:06.051542 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.551518353 +0000 UTC m=+154.238963545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.131385 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cwwpm"] Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.133376 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.155780 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:06 crc kubenswrapper[4664]: E1013 06:49:06.156542 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.656517177 +0000 UTC m=+154.343962369 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.195919 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.203113 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cwwpm"] Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.216187 4664 generic.go:334] "Generic (PLEG): container finished" podID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerID="011a1cc93e4b8354930e88dafdb970e13943a506cff93d406866b38c1be199de" exitCode=0 Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.216246 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjqrl" event={"ID":"0a5c1831-9145-4fd7-943b-c49bd95edf2d","Type":"ContainerDied","Data":"011a1cc93e4b8354930e88dafdb970e13943a506cff93d406866b38c1be199de"} Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.216272 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjqrl" event={"ID":"0a5c1831-9145-4fd7-943b-c49bd95edf2d","Type":"ContainerStarted","Data":"b128653fbffec35d4f16d474020481ef34e32c9aa036a0b0729efb5935d66c6a"} Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.223181 4664 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-10-13T06:49:05.930316696Z","Handler":null,"Name":""} Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.238078 4664 generic.go:334] "Generic (PLEG): container finished" podID="2767d696-0a63-452a-931a-2634decc57d1" containerID="c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529" exitCode=0 Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.238163 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5htx8" event={"ID":"2767d696-0a63-452a-931a-2634decc57d1","Type":"ContainerDied","Data":"c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529"} Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.256306 4664 generic.go:334] "Generic (PLEG): container finished" podID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerID="e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a" exitCode=0 Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.256555 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zjm5p" event={"ID":"83ca1b4e-cbbb-43eb-9d12-853db933995f","Type":"ContainerDied","Data":"e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a"} Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.257519 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.257832 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-catalog-content\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.257879 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp5gk\" (UniqueName: \"kubernetes.io/projected/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-kube-api-access-wp5gk\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.257896 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-utilities\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: E1013 06:49:06.257980 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.757933772 +0000 UTC m=+154.445378964 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.258094 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:06 crc kubenswrapper[4664]: E1013 06:49:06.258426 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.758419295 +0000 UTC m=+154.445864487 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.362991 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.363496 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp5gk\" (UniqueName: \"kubernetes.io/projected/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-kube-api-access-wp5gk\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.363565 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-utilities\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.363837 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-catalog-content\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: E1013 06:49:06.365337 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.865312093 +0000 UTC m=+154.552757285 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.366862 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-utilities\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.376205 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-catalog-content\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.465548 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:06 crc kubenswrapper[4664]: E1013 06:49:06.465874 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-13 06:49:06.965861801 +0000 UTC m=+154.653306993 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tfpbq" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.481838 4664 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.481886 4664 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.502056 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:06 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:06 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:06 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.502453 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.534584 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp5gk\" (UniqueName: \"kubernetes.io/projected/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-kube-api-access-wp5gk\") pod \"redhat-operators-cwwpm\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.556937 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zzcvr"] Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.558864 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.569721 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.627450 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.627983 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zzcvr"] Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.675224 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-utilities\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.675363 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2vrt\" (UniqueName: \"kubernetes.io/projected/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-kube-api-access-t2vrt\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.675396 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.675487 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-catalog-content\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.771862 4664 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.772089 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.779623 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2vrt\" (UniqueName: \"kubernetes.io/projected/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-kube-api-access-t2vrt\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.779770 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-catalog-content\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.779876 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-utilities\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.780384 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-utilities\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.780848 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-catalog-content\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.806205 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.875153 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2vrt\" (UniqueName: \"kubernetes.io/projected/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-kube-api-access-t2vrt\") pod \"redhat-operators-zzcvr\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:06 crc kubenswrapper[4664]: I1013 06:49:06.946478 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.024276 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gqwj"] Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.071782 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.187260 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.188111 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.204305 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.216743 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.222242 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6s2t"] Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.228777 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.308451 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.308555 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.312252 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.338491 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gqwj" event={"ID":"e478ea82-bff7-4555-9004-1252b36a6a77","Type":"ContainerStarted","Data":"c0403a6fded88996dc8077b8fd0f3cd46f6b4132473aa4fcf29d56be304f8d74"} Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.371737 4664 generic.go:334] "Generic (PLEG): container finished" podID="89073c83-c4b0-460e-8011-433081541325" containerID="894ab5deb891e5dfa91d2b388b8ac0e959f15d17893bf720e30f7646bb590257" exitCode=0 Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.372751 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" event={"ID":"89073c83-c4b0-460e-8011-433081541325","Type":"ContainerDied","Data":"894ab5deb891e5dfa91d2b388b8ac0e959f15d17893bf720e30f7646bb590257"} Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.428219 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tfpbq\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.429487 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.429549 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.430520 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.493240 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.509663 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:07 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:07 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:07 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.509718 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.519944 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.525628 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:07 crc kubenswrapper[4664]: I1013 06:49:07.766941 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cwwpm"] Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.033286 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zzcvr"] Oct 13 06:49:08 crc kubenswrapper[4664]: W1013 06:49:08.115032 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ce6d91a_62f4_4f5e_8551_9bcb526f0d5d.slice/crio-cd4d8f24a7cc0fcd9be4662c6dc8055a192cb5f9f48e4407058b8bdbf407356c WatchSource:0}: Error finding container cd4d8f24a7cc0fcd9be4662c6dc8055a192cb5f9f48e4407058b8bdbf407356c: Status 404 returned error can't find the container with id cd4d8f24a7cc0fcd9be4662c6dc8055a192cb5f9f48e4407058b8bdbf407356c Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.445721 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerStarted","Data":"e5e467b349faba6f7a21940a28d9c49dabb123ddc30cf05998c81bc4c1557389"} Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.445785 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerStarted","Data":"7101a953f91dd6f98b533d560e3844f610c1e74e99e5b16d946bd0c8c7f2c85a"} Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.471342 4664 generic.go:334] "Generic (PLEG): container finished" podID="e478ea82-bff7-4555-9004-1252b36a6a77" containerID="b59321754374f22b65fed80d8cef6ec8eede92ea3919d2084bb6f98b8b126564" exitCode=0 Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.471458 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gqwj" event={"ID":"e478ea82-bff7-4555-9004-1252b36a6a77","Type":"ContainerDied","Data":"b59321754374f22b65fed80d8cef6ec8eede92ea3919d2084bb6f98b8b126564"} Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.485062 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzcvr" event={"ID":"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d","Type":"ContainerStarted","Data":"cd4d8f24a7cc0fcd9be4662c6dc8055a192cb5f9f48e4407058b8bdbf407356c"} Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.504009 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:08 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:08 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:08 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.504084 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.508289 4664 generic.go:334] "Generic (PLEG): container finished" podID="8980bd64-f06f-424e-91cb-41b402bfc136" containerID="1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78" exitCode=0 Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.508418 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6s2t" event={"ID":"8980bd64-f06f-424e-91cb-41b402bfc136","Type":"ContainerDied","Data":"1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78"} Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.508503 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6s2t" event={"ID":"8980bd64-f06f-424e-91cb-41b402bfc136","Type":"ContainerStarted","Data":"dfd94b6bce92702a3851f56ce5b235901fd92c6d33dd4f83d9abd7fc81a3b5cc"} Oct 13 06:49:08 crc kubenswrapper[4664]: I1013 06:49:08.541947 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tfpbq"] Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.014043 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.247237 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.257194 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-s7fbf" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.372332 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.525466 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89073c83-c4b0-460e-8011-433081541325-config-volume\") pod \"89073c83-c4b0-460e-8011-433081541325\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.525638 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89073c83-c4b0-460e-8011-433081541325-secret-volume\") pod \"89073c83-c4b0-460e-8011-433081541325\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.525681 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx675\" (UniqueName: \"kubernetes.io/projected/89073c83-c4b0-460e-8011-433081541325-kube-api-access-vx675\") pod \"89073c83-c4b0-460e-8011-433081541325\" (UID: \"89073c83-c4b0-460e-8011-433081541325\") " Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.526624 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:09 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:09 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:09 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.526698 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.528431 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89073c83-c4b0-460e-8011-433081541325-config-volume" (OuterVolumeSpecName: "config-volume") pod "89073c83-c4b0-460e-8011-433081541325" (UID: "89073c83-c4b0-460e-8011-433081541325"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.539524 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89073c83-c4b0-460e-8011-433081541325-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.544372 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89073c83-c4b0-460e-8011-433081541325-kube-api-access-vx675" (OuterVolumeSpecName: "kube-api-access-vx675") pod "89073c83-c4b0-460e-8011-433081541325" (UID: "89073c83-c4b0-460e-8011-433081541325"). InnerVolumeSpecName "kube-api-access-vx675". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.550181 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89073c83-c4b0-460e-8011-433081541325-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "89073c83-c4b0-460e-8011-433081541325" (UID: "89073c83-c4b0-460e-8011-433081541325"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.638252 4664 generic.go:334] "Generic (PLEG): container finished" podID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerID="e5e467b349faba6f7a21940a28d9c49dabb123ddc30cf05998c81bc4c1557389" exitCode=0 Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.638424 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerDied","Data":"e5e467b349faba6f7a21940a28d9c49dabb123ddc30cf05998c81bc4c1557389"} Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.640048 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx675\" (UniqueName: \"kubernetes.io/projected/89073c83-c4b0-460e-8011-433081541325-kube-api-access-vx675\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.640074 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89073c83-c4b0-460e-8011-433081541325-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.761727 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad","Type":"ContainerStarted","Data":"cbab2b8164efe76c5d579758512921fc66507d512fca090a6095628b1123522f"} Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.825222 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.827283 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49" event={"ID":"89073c83-c4b0-460e-8011-433081541325","Type":"ContainerDied","Data":"d69ac2381effc8e3a609def8a79eb8afde0e0720f28121e63883fd33af35d7b8"} Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.827362 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d69ac2381effc8e3a609def8a79eb8afde0e0720f28121e63883fd33af35d7b8" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.851243 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" event={"ID":"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6","Type":"ContainerStarted","Data":"eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f"} Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.851295 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" event={"ID":"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6","Type":"ContainerStarted","Data":"0f177798c2cbb74b6479796f1bec2c0b77c41193fb03e86591d54be05f8167dc"} Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.930942 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" podStartSLOduration=136.930894717 podStartE2EDuration="2m16.930894717s" podCreationTimestamp="2025-10-13 06:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:09.92707732 +0000 UTC m=+157.614522522" watchObservedRunningTime="2025-10-13 06:49:09.930894717 +0000 UTC m=+157.618339919" Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.985063 4664 generic.go:334] "Generic (PLEG): container finished" podID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerID="000b03179a1bb2a998efe577d3f6f8b54067897ef5d9823b63141076ef035c40" exitCode=0 Oct 13 06:49:09 crc kubenswrapper[4664]: I1013 06:49:09.986642 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzcvr" event={"ID":"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d","Type":"ContainerDied","Data":"000b03179a1bb2a998efe577d3f6f8b54067897ef5d9823b63141076ef035c40"} Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.298375 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 13 06:49:10 crc kubenswrapper[4664]: E1013 06:49:10.299164 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89073c83-c4b0-460e-8011-433081541325" containerName="collect-profiles" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.299180 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="89073c83-c4b0-460e-8011-433081541325" containerName="collect-profiles" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.299306 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="89073c83-c4b0-460e-8011-433081541325" containerName="collect-profiles" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.302176 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.306421 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.312071 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.333617 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.410050 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-lfw8h" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.467319 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e493fe25-4de0-4078-9994-4732e9c9ad80-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.467383 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e493fe25-4de0-4078-9994-4732e9c9ad80-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.517089 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:10 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:10 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:10 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.517227 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.570139 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e493fe25-4de0-4078-9994-4732e9c9ad80-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.570272 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e493fe25-4de0-4078-9994-4732e9c9ad80-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.570565 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e493fe25-4de0-4078-9994-4732e9c9ad80-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.596127 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e493fe25-4de0-4078-9994-4732e9c9ad80-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:10 crc kubenswrapper[4664]: I1013 06:49:10.648313 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:11 crc kubenswrapper[4664]: I1013 06:49:11.032943 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad","Type":"ContainerStarted","Data":"1dbda57e6de67496391a34dfec0d56503397e5dfd897917d0e3ae0ca22715d50"} Oct 13 06:49:11 crc kubenswrapper[4664]: I1013 06:49:11.033373 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:11 crc kubenswrapper[4664]: I1013 06:49:11.052533 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=4.052517237 podStartE2EDuration="4.052517237s" podCreationTimestamp="2025-10-13 06:49:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:11.049260236 +0000 UTC m=+158.736705428" watchObservedRunningTime="2025-10-13 06:49:11.052517237 +0000 UTC m=+158.739962419" Oct 13 06:49:11 crc kubenswrapper[4664]: I1013 06:49:11.498702 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:11 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:11 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:11 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:11 crc kubenswrapper[4664]: I1013 06:49:11.498832 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:11 crc kubenswrapper[4664]: I1013 06:49:11.523404 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 13 06:49:11 crc kubenswrapper[4664]: W1013 06:49:11.588527 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pode493fe25_4de0_4078_9994_4732e9c9ad80.slice/crio-e9ece59bee0568c609d547331aba04daeaf71a8c9e341c8bc8f8b8cb5bac8735 WatchSource:0}: Error finding container e9ece59bee0568c609d547331aba04daeaf71a8c9e341c8bc8f8b8cb5bac8735: Status 404 returned error can't find the container with id e9ece59bee0568c609d547331aba04daeaf71a8c9e341c8bc8f8b8cb5bac8735 Oct 13 06:49:12 crc kubenswrapper[4664]: I1013 06:49:12.123448 4664 generic.go:334] "Generic (PLEG): container finished" podID="efe10b4e-7e51-429a-8b11-0d4a4e14c6ad" containerID="1dbda57e6de67496391a34dfec0d56503397e5dfd897917d0e3ae0ca22715d50" exitCode=0 Oct 13 06:49:12 crc kubenswrapper[4664]: I1013 06:49:12.123580 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad","Type":"ContainerDied","Data":"1dbda57e6de67496391a34dfec0d56503397e5dfd897917d0e3ae0ca22715d50"} Oct 13 06:49:12 crc kubenswrapper[4664]: I1013 06:49:12.156959 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e493fe25-4de0-4078-9994-4732e9c9ad80","Type":"ContainerStarted","Data":"e9ece59bee0568c609d547331aba04daeaf71a8c9e341c8bc8f8b8cb5bac8735"} Oct 13 06:49:12 crc kubenswrapper[4664]: I1013 06:49:12.499319 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:12 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:12 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:12 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:12 crc kubenswrapper[4664]: I1013 06:49:12.499392 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.217768 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e493fe25-4de0-4078-9994-4732e9c9ad80","Type":"ContainerStarted","Data":"18f753d30bf2cce1d31d0a186e9808d241c0fc76cd0753d67a6d0119d745039d"} Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.398845 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-dh2vl" Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.429997 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.429951928 podStartE2EDuration="3.429951928s" podCreationTimestamp="2025-10-13 06:49:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:13.251125443 +0000 UTC m=+160.938570815" watchObservedRunningTime="2025-10-13 06:49:13.429951928 +0000 UTC m=+161.117397110" Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.498182 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:13 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:13 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:13 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.498292 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.799872 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.941917 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kube-api-access\") pod \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.941976 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kubelet-dir\") pod \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\" (UID: \"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad\") " Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.942382 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "efe10b4e-7e51-429a-8b11-0d4a4e14c6ad" (UID: "efe10b4e-7e51-429a-8b11-0d4a4e14c6ad"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:49:13 crc kubenswrapper[4664]: I1013 06:49:13.970728 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "efe10b4e-7e51-429a-8b11-0d4a4e14c6ad" (UID: "efe10b4e-7e51-429a-8b11-0d4a4e14c6ad"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.043835 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.043869 4664 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efe10b4e-7e51-429a-8b11-0d4a4e14c6ad-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.281855 4664 generic.go:334] "Generic (PLEG): container finished" podID="e493fe25-4de0-4078-9994-4732e9c9ad80" containerID="18f753d30bf2cce1d31d0a186e9808d241c0fc76cd0753d67a6d0119d745039d" exitCode=0 Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.281940 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e493fe25-4de0-4078-9994-4732e9c9ad80","Type":"ContainerDied","Data":"18f753d30bf2cce1d31d0a186e9808d241c0fc76cd0753d67a6d0119d745039d"} Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.303490 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"efe10b4e-7e51-429a-8b11-0d4a4e14c6ad","Type":"ContainerDied","Data":"cbab2b8164efe76c5d579758512921fc66507d512fca090a6095628b1123522f"} Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.303552 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbab2b8164efe76c5d579758512921fc66507d512fca090a6095628b1123522f" Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.303628 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 13 06:49:14 crc kubenswrapper[4664]: E1013 06:49:14.378403 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-podefe10b4e_7e51_429a_8b11_0d4a4e14c6ad.slice/crio-cbab2b8164efe76c5d579758512921fc66507d512fca090a6095628b1123522f\": RecentStats: unable to find data in memory cache]" Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.498983 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:14 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:14 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:14 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:14 crc kubenswrapper[4664]: I1013 06:49:14.499057 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.147386 4664 patch_prober.go:28] interesting pod/console-f9d7485db-ml2tj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.34:8443/health\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.148141 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-ml2tj" podUID="fb673675-43af-441e-9cf3-f5f283ef9558" containerName="console" probeResult="failure" output="Get \"https://10.217.0.34:8443/health\": dial tcp 10.217.0.34:8443: connect: connection refused" Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.497902 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:15 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:15 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:15 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.497993 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.869296 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.980248 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e493fe25-4de0-4078-9994-4732e9c9ad80-kubelet-dir\") pod \"e493fe25-4de0-4078-9994-4732e9c9ad80\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.980408 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e493fe25-4de0-4078-9994-4732e9c9ad80-kube-api-access\") pod \"e493fe25-4de0-4078-9994-4732e9c9ad80\" (UID: \"e493fe25-4de0-4078-9994-4732e9c9ad80\") " Oct 13 06:49:15 crc kubenswrapper[4664]: I1013 06:49:15.982210 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e493fe25-4de0-4078-9994-4732e9c9ad80-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e493fe25-4de0-4078-9994-4732e9c9ad80" (UID: "e493fe25-4de0-4078-9994-4732e9c9ad80"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.000664 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e493fe25-4de0-4078-9994-4732e9c9ad80-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e493fe25-4de0-4078-9994-4732e9c9ad80" (UID: "e493fe25-4de0-4078-9994-4732e9c9ad80"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.082468 4664 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e493fe25-4de0-4078-9994-4732e9c9ad80-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.083072 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e493fe25-4de0-4078-9994-4732e9c9ad80-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.431965 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e493fe25-4de0-4078-9994-4732e9c9ad80","Type":"ContainerDied","Data":"e9ece59bee0568c609d547331aba04daeaf71a8c9e341c8bc8f8b8cb5bac8735"} Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.432014 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9ece59bee0568c609d547331aba04daeaf71a8c9e341c8bc8f8b8cb5bac8735" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.432162 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.488940 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.494373 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eba49cc7-48bb-4372-8eb3-c88513c591b9-metrics-certs\") pod \"network-metrics-daemon-9mgbt\" (UID: \"eba49cc7-48bb-4372-8eb3-c88513c591b9\") " pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.508048 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:16 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:16 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:16 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.508141 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:16 crc kubenswrapper[4664]: I1013 06:49:16.567520 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9mgbt" Oct 13 06:49:17 crc kubenswrapper[4664]: I1013 06:49:17.160925 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-9mgbt"] Oct 13 06:49:17 crc kubenswrapper[4664]: I1013 06:49:17.451496 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" event={"ID":"eba49cc7-48bb-4372-8eb3-c88513c591b9","Type":"ContainerStarted","Data":"32047df8ea091da18dd650b0170603f821a9f94d0b4f01bfe1f0151a0b2b9a74"} Oct 13 06:49:17 crc kubenswrapper[4664]: I1013 06:49:17.497741 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:17 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:17 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:17 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:17 crc kubenswrapper[4664]: I1013 06:49:17.497915 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:18 crc kubenswrapper[4664]: I1013 06:49:18.481957 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" event={"ID":"eba49cc7-48bb-4372-8eb3-c88513c591b9","Type":"ContainerStarted","Data":"c98b404a12b4667e1a2578bba6a53b268b85ff31bd7f96e4d9fee5f4355e4786"} Oct 13 06:49:18 crc kubenswrapper[4664]: I1013 06:49:18.500383 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:18 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:18 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:18 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:18 crc kubenswrapper[4664]: I1013 06:49:18.500447 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:19 crc kubenswrapper[4664]: I1013 06:49:19.497679 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 13 06:49:19 crc kubenswrapper[4664]: [-]has-synced failed: reason withheld Oct 13 06:49:19 crc kubenswrapper[4664]: [+]process-running ok Oct 13 06:49:19 crc kubenswrapper[4664]: healthz check failed Oct 13 06:49:19 crc kubenswrapper[4664]: I1013 06:49:19.498039 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 06:49:20 crc kubenswrapper[4664]: I1013 06:49:20.498646 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:49:20 crc kubenswrapper[4664]: I1013 06:49:20.504367 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 06:49:25 crc kubenswrapper[4664]: I1013 06:49:25.153145 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:49:25 crc kubenswrapper[4664]: I1013 06:49:25.156925 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:49:26 crc kubenswrapper[4664]: I1013 06:49:26.643536 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9mgbt" event={"ID":"eba49cc7-48bb-4372-8eb3-c88513c591b9","Type":"ContainerStarted","Data":"de7bf18d4a467019fbb64155c9b24423ac0df2589e3100f91f17b217a0a45214"} Oct 13 06:49:26 crc kubenswrapper[4664]: I1013 06:49:26.667949 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-9mgbt" podStartSLOduration=154.667925939 podStartE2EDuration="2m34.667925939s" podCreationTimestamp="2025-10-13 06:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:49:26.663876786 +0000 UTC m=+174.351321988" watchObservedRunningTime="2025-10-13 06:49:26.667925939 +0000 UTC m=+174.355371131" Oct 13 06:49:27 crc kubenswrapper[4664]: I1013 06:49:27.500049 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:49:28 crc kubenswrapper[4664]: I1013 06:49:28.811669 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:49:28 crc kubenswrapper[4664]: I1013 06:49:28.811761 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:49:35 crc kubenswrapper[4664]: I1013 06:49:35.364276 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.310523 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.311233 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t2vrt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-zzcvr_openshift-marketplace(8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.312403 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-zzcvr" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.341911 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.342317 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sd7xn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-zjm5p_openshift-marketplace(83ca1b4e-cbbb-43eb-9d12-853db933995f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.343638 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-zjm5p" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.359291 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.359468 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6nt6s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-z6s2t_openshift-marketplace(8980bd64-f06f-424e-91cb-41b402bfc136): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.360822 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-z6s2t" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.398867 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.402404 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wp5gk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-cwwpm_openshift-marketplace(9283a98e-b18c-45bf-9f1c-3d2dac8b372e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.405921 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-cwwpm" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" Oct 13 06:49:39 crc kubenswrapper[4664]: I1013 06:49:39.734997 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6645" event={"ID":"b874d683-d666-4c72-8a82-7bfb88b53abd","Type":"ContainerStarted","Data":"036f5897382688c55519b614452bc9a3edfcfade0dc7a436d2097455783ee3af"} Oct 13 06:49:39 crc kubenswrapper[4664]: I1013 06:49:39.736920 4664 generic.go:334] "Generic (PLEG): container finished" podID="e478ea82-bff7-4555-9004-1252b36a6a77" containerID="51fd7d3c3ce81e6ea55418e95eb8617717ee339fc71c7cba8260090894536683" exitCode=0 Oct 13 06:49:39 crc kubenswrapper[4664]: I1013 06:49:39.736972 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gqwj" event={"ID":"e478ea82-bff7-4555-9004-1252b36a6a77","Type":"ContainerDied","Data":"51fd7d3c3ce81e6ea55418e95eb8617717ee339fc71c7cba8260090894536683"} Oct 13 06:49:39 crc kubenswrapper[4664]: I1013 06:49:39.739994 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5htx8" event={"ID":"2767d696-0a63-452a-931a-2634decc57d1","Type":"ContainerStarted","Data":"5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9"} Oct 13 06:49:39 crc kubenswrapper[4664]: I1013 06:49:39.743895 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjqrl" event={"ID":"0a5c1831-9145-4fd7-943b-c49bd95edf2d","Type":"ContainerStarted","Data":"ed604d1f8d88e1832c24808c301496cdcae6c132fcb3d58c974aa18d5635b7d3"} Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.746334 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-cwwpm" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.746654 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-zzcvr" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.747080 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zjm5p" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" Oct 13 06:49:39 crc kubenswrapper[4664]: E1013 06:49:39.749438 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-z6s2t" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" Oct 13 06:49:40 crc kubenswrapper[4664]: I1013 06:49:40.765089 4664 generic.go:334] "Generic (PLEG): container finished" podID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerID="ed604d1f8d88e1832c24808c301496cdcae6c132fcb3d58c974aa18d5635b7d3" exitCode=0 Oct 13 06:49:40 crc kubenswrapper[4664]: I1013 06:49:40.765240 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjqrl" event={"ID":"0a5c1831-9145-4fd7-943b-c49bd95edf2d","Type":"ContainerDied","Data":"ed604d1f8d88e1832c24808c301496cdcae6c132fcb3d58c974aa18d5635b7d3"} Oct 13 06:49:40 crc kubenswrapper[4664]: I1013 06:49:40.774118 4664 generic.go:334] "Generic (PLEG): container finished" podID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerID="036f5897382688c55519b614452bc9a3edfcfade0dc7a436d2097455783ee3af" exitCode=0 Oct 13 06:49:40 crc kubenswrapper[4664]: I1013 06:49:40.774231 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6645" event={"ID":"b874d683-d666-4c72-8a82-7bfb88b53abd","Type":"ContainerDied","Data":"036f5897382688c55519b614452bc9a3edfcfade0dc7a436d2097455783ee3af"} Oct 13 06:49:40 crc kubenswrapper[4664]: I1013 06:49:40.785160 4664 generic.go:334] "Generic (PLEG): container finished" podID="2767d696-0a63-452a-931a-2634decc57d1" containerID="5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9" exitCode=0 Oct 13 06:49:40 crc kubenswrapper[4664]: I1013 06:49:40.785375 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5htx8" event={"ID":"2767d696-0a63-452a-931a-2634decc57d1","Type":"ContainerDied","Data":"5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9"} Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.795174 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gqwj" event={"ID":"e478ea82-bff7-4555-9004-1252b36a6a77","Type":"ContainerStarted","Data":"283e8af63a7c3cf26dc264cf564226b73ed4e6371edfb71bb0a618f6fdfce5ed"} Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.798313 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5htx8" event={"ID":"2767d696-0a63-452a-931a-2634decc57d1","Type":"ContainerStarted","Data":"4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5"} Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.801632 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjqrl" event={"ID":"0a5c1831-9145-4fd7-943b-c49bd95edf2d","Type":"ContainerStarted","Data":"49d5ed9eac252c054bc034083cb6c0929bc5c42a3808f466452d101f66a0ea73"} Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.804702 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6645" event={"ID":"b874d683-d666-4c72-8a82-7bfb88b53abd","Type":"ContainerStarted","Data":"8f10e627404a78f985ed34c424c6c8c6bb0109a3864d012f909714c526e1885e"} Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.833165 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6gqwj" podStartSLOduration=3.878339065 podStartE2EDuration="36.833140308s" podCreationTimestamp="2025-10-13 06:49:05 +0000 UTC" firstStartedPulling="2025-10-13 06:49:08.474038849 +0000 UTC m=+156.161484041" lastFinishedPulling="2025-10-13 06:49:41.428840052 +0000 UTC m=+189.116285284" observedRunningTime="2025-10-13 06:49:41.815915166 +0000 UTC m=+189.503360358" watchObservedRunningTime="2025-10-13 06:49:41.833140308 +0000 UTC m=+189.520585510" Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.858047 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xjqrl" podStartSLOduration=4.788720013 podStartE2EDuration="38.858019446s" podCreationTimestamp="2025-10-13 06:49:03 +0000 UTC" firstStartedPulling="2025-10-13 06:49:07.42733284 +0000 UTC m=+155.114778032" lastFinishedPulling="2025-10-13 06:49:41.496632233 +0000 UTC m=+189.184077465" observedRunningTime="2025-10-13 06:49:41.837358967 +0000 UTC m=+189.524804179" watchObservedRunningTime="2025-10-13 06:49:41.858019446 +0000 UTC m=+189.545464638" Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.888479 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5htx8" podStartSLOduration=3.625453167 podStartE2EDuration="38.88845858s" podCreationTimestamp="2025-10-13 06:49:03 +0000 UTC" firstStartedPulling="2025-10-13 06:49:06.241331396 +0000 UTC m=+153.928776588" lastFinishedPulling="2025-10-13 06:49:41.504336759 +0000 UTC m=+189.191782001" observedRunningTime="2025-10-13 06:49:41.887883233 +0000 UTC m=+189.575328445" watchObservedRunningTime="2025-10-13 06:49:41.88845858 +0000 UTC m=+189.575903772" Oct 13 06:49:41 crc kubenswrapper[4664]: I1013 06:49:41.889019 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d6645" podStartSLOduration=3.552022478 podStartE2EDuration="39.889012055s" podCreationTimestamp="2025-10-13 06:49:02 +0000 UTC" firstStartedPulling="2025-10-13 06:49:05.233706853 +0000 UTC m=+152.921152045" lastFinishedPulling="2025-10-13 06:49:41.57069638 +0000 UTC m=+189.258141622" observedRunningTime="2025-10-13 06:49:41.86424445 +0000 UTC m=+189.551689652" watchObservedRunningTime="2025-10-13 06:49:41.889012055 +0000 UTC m=+189.576457247" Oct 13 06:49:42 crc kubenswrapper[4664]: I1013 06:49:42.366287 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 13 06:49:43 crc kubenswrapper[4664]: I1013 06:49:43.223248 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:43 crc kubenswrapper[4664]: I1013 06:49:43.223377 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:43 crc kubenswrapper[4664]: I1013 06:49:43.665708 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:43 crc kubenswrapper[4664]: I1013 06:49:43.666256 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:44 crc kubenswrapper[4664]: I1013 06:49:44.052618 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:44 crc kubenswrapper[4664]: I1013 06:49:44.052706 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:44 crc kubenswrapper[4664]: I1013 06:49:44.102669 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:44 crc kubenswrapper[4664]: I1013 06:49:44.410616 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-d6645" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="registry-server" probeResult="failure" output=< Oct 13 06:49:44 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 06:49:44 crc kubenswrapper[4664]: > Oct 13 06:49:44 crc kubenswrapper[4664]: I1013 06:49:44.707909 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-5htx8" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="registry-server" probeResult="failure" output=< Oct 13 06:49:44 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 06:49:44 crc kubenswrapper[4664]: > Oct 13 06:49:45 crc kubenswrapper[4664]: I1013 06:49:45.665826 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:45 crc kubenswrapper[4664]: I1013 06:49:45.665895 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:45 crc kubenswrapper[4664]: I1013 06:49:45.713045 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:51 crc kubenswrapper[4664]: I1013 06:49:51.885871 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzcvr" event={"ID":"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d","Type":"ContainerStarted","Data":"06048aff85826171e7ae9179060f0097ed1c6b7a7e63b96b6ecba4cdc7fb3313"} Oct 13 06:49:52 crc kubenswrapper[4664]: I1013 06:49:52.896414 4664 generic.go:334] "Generic (PLEG): container finished" podID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerID="06048aff85826171e7ae9179060f0097ed1c6b7a7e63b96b6ecba4cdc7fb3313" exitCode=0 Oct 13 06:49:52 crc kubenswrapper[4664]: I1013 06:49:52.896486 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzcvr" event={"ID":"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d","Type":"ContainerDied","Data":"06048aff85826171e7ae9179060f0097ed1c6b7a7e63b96b6ecba4cdc7fb3313"} Oct 13 06:49:52 crc kubenswrapper[4664]: I1013 06:49:52.904336 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zjm5p" event={"ID":"83ca1b4e-cbbb-43eb-9d12-853db933995f","Type":"ContainerStarted","Data":"c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54"} Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.264516 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.319864 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.719620 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.762308 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.910025 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerStarted","Data":"675bbbd47d2d3ed138471c13fc8b1a5ef0f0d5f18fae58d8070fd2e2b4a92638"} Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.918694 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzcvr" event={"ID":"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d","Type":"ContainerStarted","Data":"cc0ad7148e94f61e46d1f12e22b888c56ee95190294eb7ba05e63b0f6b52c226"} Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.921524 4664 generic.go:334] "Generic (PLEG): container finished" podID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerID="c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54" exitCode=0 Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.921687 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zjm5p" event={"ID":"83ca1b4e-cbbb-43eb-9d12-853db933995f","Type":"ContainerDied","Data":"c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54"} Oct 13 06:49:53 crc kubenswrapper[4664]: I1013 06:49:53.975844 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zzcvr" podStartSLOduration=4.624289236 podStartE2EDuration="47.975821287s" podCreationTimestamp="2025-10-13 06:49:06 +0000 UTC" firstStartedPulling="2025-10-13 06:49:09.996757024 +0000 UTC m=+157.684202216" lastFinishedPulling="2025-10-13 06:49:53.348289075 +0000 UTC m=+201.035734267" observedRunningTime="2025-10-13 06:49:53.95433871 +0000 UTC m=+201.641783912" watchObservedRunningTime="2025-10-13 06:49:53.975821287 +0000 UTC m=+201.663266479" Oct 13 06:49:54 crc kubenswrapper[4664]: I1013 06:49:54.131327 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:54 crc kubenswrapper[4664]: I1013 06:49:54.928757 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zjm5p" event={"ID":"83ca1b4e-cbbb-43eb-9d12-853db933995f","Type":"ContainerStarted","Data":"60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7"} Oct 13 06:49:54 crc kubenswrapper[4664]: I1013 06:49:54.942262 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6s2t" event={"ID":"8980bd64-f06f-424e-91cb-41b402bfc136","Type":"ContainerStarted","Data":"428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7"} Oct 13 06:49:54 crc kubenswrapper[4664]: I1013 06:49:54.945634 4664 generic.go:334] "Generic (PLEG): container finished" podID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerID="675bbbd47d2d3ed138471c13fc8b1a5ef0f0d5f18fae58d8070fd2e2b4a92638" exitCode=0 Oct 13 06:49:54 crc kubenswrapper[4664]: I1013 06:49:54.946117 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerDied","Data":"675bbbd47d2d3ed138471c13fc8b1a5ef0f0d5f18fae58d8070fd2e2b4a92638"} Oct 13 06:49:54 crc kubenswrapper[4664]: I1013 06:49:54.982986 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zjm5p" podStartSLOduration=3.911108318 podStartE2EDuration="51.982966351s" podCreationTimestamp="2025-10-13 06:49:03 +0000 UTC" firstStartedPulling="2025-10-13 06:49:06.267224061 +0000 UTC m=+153.954669253" lastFinishedPulling="2025-10-13 06:49:54.339082094 +0000 UTC m=+202.026527286" observedRunningTime="2025-10-13 06:49:54.95747952 +0000 UTC m=+202.644924732" watchObservedRunningTime="2025-10-13 06:49:54.982966351 +0000 UTC m=+202.670411543" Oct 13 06:49:55 crc kubenswrapper[4664]: I1013 06:49:55.717423 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:49:55 crc kubenswrapper[4664]: I1013 06:49:55.954317 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerStarted","Data":"324432bac4b4d13e00f35bf94e3d32028183d8be9dde8bcc48543f919d11273a"} Oct 13 06:49:55 crc kubenswrapper[4664]: I1013 06:49:55.956252 4664 generic.go:334] "Generic (PLEG): container finished" podID="8980bd64-f06f-424e-91cb-41b402bfc136" containerID="428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7" exitCode=0 Oct 13 06:49:55 crc kubenswrapper[4664]: I1013 06:49:55.956269 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6s2t" event={"ID":"8980bd64-f06f-424e-91cb-41b402bfc136","Type":"ContainerDied","Data":"428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7"} Oct 13 06:49:55 crc kubenswrapper[4664]: I1013 06:49:55.982265 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cwwpm" podStartSLOduration=4.093654454 podStartE2EDuration="49.98224352s" podCreationTimestamp="2025-10-13 06:49:06 +0000 UTC" firstStartedPulling="2025-10-13 06:49:09.659955781 +0000 UTC m=+157.347400973" lastFinishedPulling="2025-10-13 06:49:55.548544847 +0000 UTC m=+203.235990039" observedRunningTime="2025-10-13 06:49:55.978890833 +0000 UTC m=+203.666336025" watchObservedRunningTime="2025-10-13 06:49:55.98224352 +0000 UTC m=+203.669688712" Oct 13 06:49:56 crc kubenswrapper[4664]: I1013 06:49:56.808084 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:56 crc kubenswrapper[4664]: I1013 06:49:56.808158 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:49:56 crc kubenswrapper[4664]: I1013 06:49:56.947658 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:56 crc kubenswrapper[4664]: I1013 06:49:56.947730 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:49:56 crc kubenswrapper[4664]: I1013 06:49:56.993886 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6s2t" event={"ID":"8980bd64-f06f-424e-91cb-41b402bfc136","Type":"ContainerStarted","Data":"5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0"} Oct 13 06:49:57 crc kubenswrapper[4664]: I1013 06:49:57.018717 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z6s2t" podStartSLOduration=4.110572605 podStartE2EDuration="52.018699603s" podCreationTimestamp="2025-10-13 06:49:05 +0000 UTC" firstStartedPulling="2025-10-13 06:49:08.517075595 +0000 UTC m=+156.204520787" lastFinishedPulling="2025-10-13 06:49:56.425202593 +0000 UTC m=+204.112647785" observedRunningTime="2025-10-13 06:49:57.016240269 +0000 UTC m=+204.703685471" watchObservedRunningTime="2025-10-13 06:49:57.018699603 +0000 UTC m=+204.706144795" Oct 13 06:49:57 crc kubenswrapper[4664]: I1013 06:49:57.084636 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xjqrl"] Oct 13 06:49:57 crc kubenswrapper[4664]: I1013 06:49:57.084947 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xjqrl" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="registry-server" containerID="cri-o://49d5ed9eac252c054bc034083cb6c0929bc5c42a3808f466452d101f66a0ea73" gracePeriod=2 Oct 13 06:49:57 crc kubenswrapper[4664]: I1013 06:49:57.861621 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cwwpm" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="registry-server" probeResult="failure" output=< Oct 13 06:49:57 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 06:49:57 crc kubenswrapper[4664]: > Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.003233 4664 generic.go:334] "Generic (PLEG): container finished" podID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerID="49d5ed9eac252c054bc034083cb6c0929bc5c42a3808f466452d101f66a0ea73" exitCode=0 Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.003300 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjqrl" event={"ID":"0a5c1831-9145-4fd7-943b-c49bd95edf2d","Type":"ContainerDied","Data":"49d5ed9eac252c054bc034083cb6c0929bc5c42a3808f466452d101f66a0ea73"} Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.003346 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xjqrl" event={"ID":"0a5c1831-9145-4fd7-943b-c49bd95edf2d","Type":"ContainerDied","Data":"b128653fbffec35d4f16d474020481ef34e32c9aa036a0b0729efb5935d66c6a"} Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.003364 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b128653fbffec35d4f16d474020481ef34e32c9aa036a0b0729efb5935d66c6a" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.019310 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.045417 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zzcvr" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="registry-server" probeResult="failure" output=< Oct 13 06:49:58 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 06:49:58 crc kubenswrapper[4664]: > Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.107682 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-catalog-content\") pod \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.107740 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fvrq\" (UniqueName: \"kubernetes.io/projected/0a5c1831-9145-4fd7-943b-c49bd95edf2d-kube-api-access-7fvrq\") pod \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.107787 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-utilities\") pod \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\" (UID: \"0a5c1831-9145-4fd7-943b-c49bd95edf2d\") " Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.109083 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-utilities" (OuterVolumeSpecName: "utilities") pod "0a5c1831-9145-4fd7-943b-c49bd95edf2d" (UID: "0a5c1831-9145-4fd7-943b-c49bd95edf2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.118032 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a5c1831-9145-4fd7-943b-c49bd95edf2d-kube-api-access-7fvrq" (OuterVolumeSpecName: "kube-api-access-7fvrq") pod "0a5c1831-9145-4fd7-943b-c49bd95edf2d" (UID: "0a5c1831-9145-4fd7-943b-c49bd95edf2d"). InnerVolumeSpecName "kube-api-access-7fvrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.157547 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a5c1831-9145-4fd7-943b-c49bd95edf2d" (UID: "0a5c1831-9145-4fd7-943b-c49bd95edf2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.209463 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.209500 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a5c1831-9145-4fd7-943b-c49bd95edf2d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.209515 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fvrq\" (UniqueName: \"kubernetes.io/projected/0a5c1831-9145-4fd7-943b-c49bd95edf2d-kube-api-access-7fvrq\") on node \"crc\" DevicePath \"\"" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.812481 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.813090 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.813564 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.814311 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 06:49:58 crc kubenswrapper[4664]: I1013 06:49:58.814553 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab" gracePeriod=600 Oct 13 06:49:59 crc kubenswrapper[4664]: I1013 06:49:59.027068 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab" exitCode=0 Oct 13 06:49:59 crc kubenswrapper[4664]: I1013 06:49:59.027269 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xjqrl" Oct 13 06:49:59 crc kubenswrapper[4664]: I1013 06:49:59.027267 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab"} Oct 13 06:49:59 crc kubenswrapper[4664]: I1013 06:49:59.072874 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xjqrl"] Oct 13 06:49:59 crc kubenswrapper[4664]: I1013 06:49:59.076194 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xjqrl"] Oct 13 06:50:00 crc kubenswrapper[4664]: I1013 06:50:00.035745 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"dc7a7f9feba16667d03ca95a6bd98653a84020768447659f114a7e08147be5b2"} Oct 13 06:50:01 crc kubenswrapper[4664]: I1013 06:50:01.056537 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" path="/var/lib/kubelet/pods/0a5c1831-9145-4fd7-943b-c49bd95edf2d/volumes" Oct 13 06:50:03 crc kubenswrapper[4664]: I1013 06:50:03.736007 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:50:03 crc kubenswrapper[4664]: I1013 06:50:03.737009 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:50:03 crc kubenswrapper[4664]: I1013 06:50:03.780442 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:50:04 crc kubenswrapper[4664]: I1013 06:50:04.125443 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:50:06 crc kubenswrapper[4664]: I1013 06:50:06.039863 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:50:06 crc kubenswrapper[4664]: I1013 06:50:06.040313 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:50:06 crc kubenswrapper[4664]: I1013 06:50:06.091360 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:50:06 crc kubenswrapper[4664]: I1013 06:50:06.142300 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:50:06 crc kubenswrapper[4664]: I1013 06:50:06.857718 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:50:06 crc kubenswrapper[4664]: I1013 06:50:06.899563 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:50:06 crc kubenswrapper[4664]: I1013 06:50:06.993656 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.041552 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.487335 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zjm5p"] Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.488082 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zjm5p" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="registry-server" containerID="cri-o://60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7" gracePeriod=2 Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.839709 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.952848 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-catalog-content\") pod \"83ca1b4e-cbbb-43eb-9d12-853db933995f\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.952923 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd7xn\" (UniqueName: \"kubernetes.io/projected/83ca1b4e-cbbb-43eb-9d12-853db933995f-kube-api-access-sd7xn\") pod \"83ca1b4e-cbbb-43eb-9d12-853db933995f\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.952978 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-utilities\") pod \"83ca1b4e-cbbb-43eb-9d12-853db933995f\" (UID: \"83ca1b4e-cbbb-43eb-9d12-853db933995f\") " Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.954259 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-utilities" (OuterVolumeSpecName: "utilities") pod "83ca1b4e-cbbb-43eb-9d12-853db933995f" (UID: "83ca1b4e-cbbb-43eb-9d12-853db933995f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:07 crc kubenswrapper[4664]: I1013 06:50:07.962017 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83ca1b4e-cbbb-43eb-9d12-853db933995f-kube-api-access-sd7xn" (OuterVolumeSpecName: "kube-api-access-sd7xn") pod "83ca1b4e-cbbb-43eb-9d12-853db933995f" (UID: "83ca1b4e-cbbb-43eb-9d12-853db933995f"). InnerVolumeSpecName "kube-api-access-sd7xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.006637 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83ca1b4e-cbbb-43eb-9d12-853db933995f" (UID: "83ca1b4e-cbbb-43eb-9d12-853db933995f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.054472 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.054506 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd7xn\" (UniqueName: \"kubernetes.io/projected/83ca1b4e-cbbb-43eb-9d12-853db933995f-kube-api-access-sd7xn\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.054520 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83ca1b4e-cbbb-43eb-9d12-853db933995f-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.110379 4664 generic.go:334] "Generic (PLEG): container finished" podID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerID="60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7" exitCode=0 Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.110425 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zjm5p" event={"ID":"83ca1b4e-cbbb-43eb-9d12-853db933995f","Type":"ContainerDied","Data":"60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7"} Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.110442 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zjm5p" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.110456 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zjm5p" event={"ID":"83ca1b4e-cbbb-43eb-9d12-853db933995f","Type":"ContainerDied","Data":"99e13d0084a86784e1dbdaafc6dac4e8f66a7e877ce2c4d1f297c68509302d96"} Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.110476 4664 scope.go:117] "RemoveContainer" containerID="60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.138639 4664 scope.go:117] "RemoveContainer" containerID="c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.143906 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zjm5p"] Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.145817 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zjm5p"] Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.171100 4664 scope.go:117] "RemoveContainer" containerID="e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.186513 4664 scope.go:117] "RemoveContainer" containerID="60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7" Oct 13 06:50:08 crc kubenswrapper[4664]: E1013 06:50:08.187098 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7\": container with ID starting with 60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7 not found: ID does not exist" containerID="60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.187141 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7"} err="failed to get container status \"60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7\": rpc error: code = NotFound desc = could not find container \"60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7\": container with ID starting with 60d4cf85c6b0acfd6f4c32b162dcc44e287367bd410e1140564cfa6d5a3319d7 not found: ID does not exist" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.187171 4664 scope.go:117] "RemoveContainer" containerID="c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54" Oct 13 06:50:08 crc kubenswrapper[4664]: E1013 06:50:08.187658 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54\": container with ID starting with c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54 not found: ID does not exist" containerID="c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.187723 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54"} err="failed to get container status \"c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54\": rpc error: code = NotFound desc = could not find container \"c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54\": container with ID starting with c777323cd0523a92761a1171be4394374bab12eada52093b7f39d75858b1bd54 not found: ID does not exist" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.187756 4664 scope.go:117] "RemoveContainer" containerID="e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a" Oct 13 06:50:08 crc kubenswrapper[4664]: E1013 06:50:08.188143 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a\": container with ID starting with e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a not found: ID does not exist" containerID="e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a" Oct 13 06:50:08 crc kubenswrapper[4664]: I1013 06:50:08.188170 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a"} err="failed to get container status \"e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a\": rpc error: code = NotFound desc = could not find container \"e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a\": container with ID starting with e53901789dcd8f28b5442a0f9badff18aa3d92708c1ffad30c0952bc8868d44a not found: ID does not exist" Oct 13 06:50:09 crc kubenswrapper[4664]: I1013 06:50:09.054257 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" path="/var/lib/kubelet/pods/83ca1b4e-cbbb-43eb-9d12-853db933995f/volumes" Oct 13 06:50:09 crc kubenswrapper[4664]: I1013 06:50:09.688056 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6s2t"] Oct 13 06:50:09 crc kubenswrapper[4664]: I1013 06:50:09.688897 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z6s2t" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="registry-server" containerID="cri-o://5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0" gracePeriod=2 Oct 13 06:50:09 crc kubenswrapper[4664]: I1013 06:50:09.886541 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zzcvr"] Oct 13 06:50:09 crc kubenswrapper[4664]: I1013 06:50:09.886822 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zzcvr" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="registry-server" containerID="cri-o://cc0ad7148e94f61e46d1f12e22b888c56ee95190294eb7ba05e63b0f6b52c226" gracePeriod=2 Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.033784 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.127789 4664 generic.go:334] "Generic (PLEG): container finished" podID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerID="cc0ad7148e94f61e46d1f12e22b888c56ee95190294eb7ba05e63b0f6b52c226" exitCode=0 Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.127947 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzcvr" event={"ID":"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d","Type":"ContainerDied","Data":"cc0ad7148e94f61e46d1f12e22b888c56ee95190294eb7ba05e63b0f6b52c226"} Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.130295 4664 generic.go:334] "Generic (PLEG): container finished" podID="8980bd64-f06f-424e-91cb-41b402bfc136" containerID="5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0" exitCode=0 Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.130321 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6s2t" event={"ID":"8980bd64-f06f-424e-91cb-41b402bfc136","Type":"ContainerDied","Data":"5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0"} Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.130375 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6s2t" event={"ID":"8980bd64-f06f-424e-91cb-41b402bfc136","Type":"ContainerDied","Data":"dfd94b6bce92702a3851f56ce5b235901fd92c6d33dd4f83d9abd7fc81a3b5cc"} Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.130413 4664 scope.go:117] "RemoveContainer" containerID="5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.130486 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6s2t" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.152392 4664 scope.go:117] "RemoveContainer" containerID="428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.170659 4664 scope.go:117] "RemoveContainer" containerID="1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.190251 4664 scope.go:117] "RemoveContainer" containerID="5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.190541 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-utilities\") pod \"8980bd64-f06f-424e-91cb-41b402bfc136\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.190603 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-catalog-content\") pod \"8980bd64-f06f-424e-91cb-41b402bfc136\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.190635 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nt6s\" (UniqueName: \"kubernetes.io/projected/8980bd64-f06f-424e-91cb-41b402bfc136-kube-api-access-6nt6s\") pod \"8980bd64-f06f-424e-91cb-41b402bfc136\" (UID: \"8980bd64-f06f-424e-91cb-41b402bfc136\") " Oct 13 06:50:10 crc kubenswrapper[4664]: E1013 06:50:10.193993 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0\": container with ID starting with 5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0 not found: ID does not exist" containerID="5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.194029 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0"} err="failed to get container status \"5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0\": rpc error: code = NotFound desc = could not find container \"5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0\": container with ID starting with 5cbf2295d9b7f6d1de61e7abff2769f1c4d103f2b391b255db5f27be8ad95ae0 not found: ID does not exist" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.194058 4664 scope.go:117] "RemoveContainer" containerID="428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.194183 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-utilities" (OuterVolumeSpecName: "utilities") pod "8980bd64-f06f-424e-91cb-41b402bfc136" (UID: "8980bd64-f06f-424e-91cb-41b402bfc136"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:10 crc kubenswrapper[4664]: E1013 06:50:10.194361 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7\": container with ID starting with 428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7 not found: ID does not exist" containerID="428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.194406 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7"} err="failed to get container status \"428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7\": rpc error: code = NotFound desc = could not find container \"428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7\": container with ID starting with 428ec656d38879054028799d141ddacebcd8df79fb658da133b81e23676efbc7 not found: ID does not exist" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.194425 4664 scope.go:117] "RemoveContainer" containerID="1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78" Oct 13 06:50:10 crc kubenswrapper[4664]: E1013 06:50:10.194692 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78\": container with ID starting with 1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78 not found: ID does not exist" containerID="1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.194709 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78"} err="failed to get container status \"1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78\": rpc error: code = NotFound desc = could not find container \"1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78\": container with ID starting with 1a9a2817e95a35c70eb8e896334cec5b11aa38d822f785b596a328ff1c4fda78 not found: ID does not exist" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.197955 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8980bd64-f06f-424e-91cb-41b402bfc136-kube-api-access-6nt6s" (OuterVolumeSpecName: "kube-api-access-6nt6s") pod "8980bd64-f06f-424e-91cb-41b402bfc136" (UID: "8980bd64-f06f-424e-91cb-41b402bfc136"). InnerVolumeSpecName "kube-api-access-6nt6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.205033 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8980bd64-f06f-424e-91cb-41b402bfc136" (UID: "8980bd64-f06f-424e-91cb-41b402bfc136"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.234475 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.292832 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.292875 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8980bd64-f06f-424e-91cb-41b402bfc136-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.292894 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nt6s\" (UniqueName: \"kubernetes.io/projected/8980bd64-f06f-424e-91cb-41b402bfc136-kube-api-access-6nt6s\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.394133 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2vrt\" (UniqueName: \"kubernetes.io/projected/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-kube-api-access-t2vrt\") pod \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.394278 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-utilities\") pod \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.394314 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-catalog-content\") pod \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\" (UID: \"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d\") " Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.394986 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-utilities" (OuterVolumeSpecName: "utilities") pod "8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" (UID: "8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.396558 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-kube-api-access-t2vrt" (OuterVolumeSpecName: "kube-api-access-t2vrt") pod "8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" (UID: "8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d"). InnerVolumeSpecName "kube-api-access-t2vrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.455595 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6s2t"] Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.463972 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6s2t"] Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.485955 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" (UID: "8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.496111 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2vrt\" (UniqueName: \"kubernetes.io/projected/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-kube-api-access-t2vrt\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.496136 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:10 crc kubenswrapper[4664]: I1013 06:50:10.496146 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.054365 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" path="/var/lib/kubelet/pods/8980bd64-f06f-424e-91cb-41b402bfc136/volumes" Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.139928 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzcvr" event={"ID":"8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d","Type":"ContainerDied","Data":"cd4d8f24a7cc0fcd9be4662c6dc8055a192cb5f9f48e4407058b8bdbf407356c"} Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.139985 4664 scope.go:117] "RemoveContainer" containerID="cc0ad7148e94f61e46d1f12e22b888c56ee95190294eb7ba05e63b0f6b52c226" Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.140039 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzcvr" Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.159236 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zzcvr"] Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.164009 4664 scope.go:117] "RemoveContainer" containerID="06048aff85826171e7ae9179060f0097ed1c6b7a7e63b96b6ecba4cdc7fb3313" Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.165629 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zzcvr"] Oct 13 06:50:11 crc kubenswrapper[4664]: I1013 06:50:11.183941 4664 scope.go:117] "RemoveContainer" containerID="000b03179a1bb2a998efe577d3f6f8b54067897ef5d9823b63141076ef035c40" Oct 13 06:50:13 crc kubenswrapper[4664]: I1013 06:50:13.055109 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" path="/var/lib/kubelet/pods/8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d/volumes" Oct 13 06:50:14 crc kubenswrapper[4664]: I1013 06:50:14.540220 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-47mrj"] Oct 13 06:50:39 crc kubenswrapper[4664]: I1013 06:50:39.576474 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" podUID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" containerName="oauth-openshift" containerID="cri-o://89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d" gracePeriod=15 Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.011674 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053435 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm"] Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053702 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efe10b4e-7e51-429a-8b11-0d4a4e14c6ad" containerName="pruner" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053719 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="efe10b4e-7e51-429a-8b11-0d4a4e14c6ad" containerName="pruner" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053785 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053823 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053836 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053845 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053859 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053869 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053904 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" containerName="oauth-openshift" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053914 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" containerName="oauth-openshift" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053923 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053930 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053938 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053944 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053952 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.053982 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="extract-utilities" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.053995 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054003 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.054016 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054023 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.054032 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054064 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.054075 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054083 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.054093 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e493fe25-4de0-4078-9994-4732e9c9ad80" containerName="pruner" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054100 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e493fe25-4de0-4078-9994-4732e9c9ad80" containerName="pruner" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.054113 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054143 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="extract-content" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.054156 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054162 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054359 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="8980bd64-f06f-424e-91cb-41b402bfc136" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054423 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" containerName="oauth-openshift" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054433 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="e493fe25-4de0-4078-9994-4732e9c9ad80" containerName="pruner" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054441 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="83ca1b4e-cbbb-43eb-9d12-853db933995f" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054448 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="efe10b4e-7e51-429a-8b11-0d4a4e14c6ad" containerName="pruner" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054480 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ce6d91a-62f4-4f5e-8551-9bcb526f0d5d" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054489 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a5c1831-9145-4fd7-943b-c49bd95edf2d" containerName="registry-server" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.054990 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.070571 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm"] Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.093588 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-dir\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.093674 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-trusted-ca-bundle\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.093772 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.093825 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-cliconfig\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.093866 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-provider-selection\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.093932 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-service-ca\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.093991 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-serving-cert\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094016 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-router-certs\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094074 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-idp-0-file-data\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094103 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-policies\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094144 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-ocp-branding-template\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094171 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-session\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094194 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7kmv\" (UniqueName: \"kubernetes.io/projected/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-kube-api-access-h7kmv\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094248 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-login\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094309 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-error\") pod \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\" (UID: \"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8\") " Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094550 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-router-certs\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094643 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094724 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094745 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094790 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggbwx\" (UniqueName: \"kubernetes.io/projected/8d816f6a-321a-498a-a01d-0e156c69b4a1-kube-api-access-ggbwx\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094844 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094902 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-session\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094937 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-login\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094977 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-service-ca\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095002 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d816f6a-321a-498a-a01d-0e156c69b4a1-audit-dir\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095052 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-error\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095078 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-audit-policies\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095103 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095147 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095456 4664 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-dir\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.094972 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095067 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.095872 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.099488 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.105724 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.109916 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.110598 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.111449 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.116575 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.116787 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-kube-api-access-h7kmv" (OuterVolumeSpecName: "kube-api-access-h7kmv") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "kube-api-access-h7kmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.121700 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.122586 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.124976 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" (UID: "8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.196727 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-session\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197368 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-login\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197412 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-service-ca\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197444 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d816f6a-321a-498a-a01d-0e156c69b4a1-audit-dir\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197470 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-error\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197491 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-audit-policies\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197523 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197552 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197577 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-router-certs\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197606 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197634 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197652 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197674 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggbwx\" (UniqueName: \"kubernetes.io/projected/8d816f6a-321a-498a-a01d-0e156c69b4a1-kube-api-access-ggbwx\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197694 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.197779 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.198028 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.199006 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-service-ca\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.199163 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d816f6a-321a-498a-a01d-0e156c69b4a1-audit-dir\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.199373 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.199855 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.200782 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-session\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201323 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d816f6a-321a-498a-a01d-0e156c69b4a1-audit-policies\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201361 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201699 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201719 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201732 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201750 4664 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201766 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201783 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201827 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7kmv\" (UniqueName: \"kubernetes.io/projected/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-kube-api-access-h7kmv\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201844 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201859 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201871 4664 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.201826 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.202294 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-login\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.202407 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.203244 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-user-template-error\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.203508 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.203973 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.204952 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d816f6a-321a-498a-a01d-0e156c69b4a1-v4-0-config-system-router-certs\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.224408 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggbwx\" (UniqueName: \"kubernetes.io/projected/8d816f6a-321a-498a-a01d-0e156c69b4a1-kube-api-access-ggbwx\") pod \"oauth-openshift-6fff5dcfd9-rpdgm\" (UID: \"8d816f6a-321a-498a-a01d-0e156c69b4a1\") " pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.332886 4664 generic.go:334] "Generic (PLEG): container finished" podID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" containerID="89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d" exitCode=0 Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.332989 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.333017 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" event={"ID":"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8","Type":"ContainerDied","Data":"89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d"} Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.333078 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-47mrj" event={"ID":"8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8","Type":"ContainerDied","Data":"ba4e883aaaf164e6d6f0ccdbc17bcb7f50b769863a5e418b2e6fa7fa7d2cc38b"} Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.333103 4664 scope.go:117] "RemoveContainer" containerID="89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.368344 4664 scope.go:117] "RemoveContainer" containerID="89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d" Oct 13 06:50:40 crc kubenswrapper[4664]: E1013 06:50:40.368832 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d\": container with ID starting with 89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d not found: ID does not exist" containerID="89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.368880 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d"} err="failed to get container status \"89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d\": rpc error: code = NotFound desc = could not find container \"89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d\": container with ID starting with 89c8d58c661506f530e16557caf6fdf31b54fe4a8d55106c07f11dbe3b31955d not found: ID does not exist" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.372489 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-47mrj"] Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.374950 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-47mrj"] Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.381528 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:40 crc kubenswrapper[4664]: I1013 06:50:40.810229 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm"] Oct 13 06:50:41 crc kubenswrapper[4664]: I1013 06:50:41.057323 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8" path="/var/lib/kubelet/pods/8d6a1df0-eb1e-4f97-8fca-1e5ef42538b8/volumes" Oct 13 06:50:41 crc kubenswrapper[4664]: I1013 06:50:41.346193 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" event={"ID":"8d816f6a-321a-498a-a01d-0e156c69b4a1","Type":"ContainerStarted","Data":"f91e5cd8608543258f8cfb67e529651e03a24e1a5451ea212f0171b0310c68d8"} Oct 13 06:50:41 crc kubenswrapper[4664]: I1013 06:50:41.346277 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" event={"ID":"8d816f6a-321a-498a-a01d-0e156c69b4a1","Type":"ContainerStarted","Data":"3762bd7e360f5617163d89d2f6f921d936da3539de5a820759ec9abced5d1762"} Oct 13 06:50:41 crc kubenswrapper[4664]: I1013 06:50:41.346563 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:41 crc kubenswrapper[4664]: I1013 06:50:41.374335 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podStartSLOduration=27.374301708 podStartE2EDuration="27.374301708s" podCreationTimestamp="2025-10-13 06:50:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:50:41.367980894 +0000 UTC m=+249.055426146" watchObservedRunningTime="2025-10-13 06:50:41.374301708 +0000 UTC m=+249.061746940" Oct 13 06:50:41 crc kubenswrapper[4664]: I1013 06:50:41.500999 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 06:50:57 crc kubenswrapper[4664]: I1013 06:50:57.958945 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5htx8"] Oct 13 06:50:57 crc kubenswrapper[4664]: I1013 06:50:57.961190 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5htx8" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="registry-server" containerID="cri-o://4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5" gracePeriod=30 Oct 13 06:50:57 crc kubenswrapper[4664]: I1013 06:50:57.982839 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d6645"] Oct 13 06:50:57 crc kubenswrapper[4664]: I1013 06:50:57.983206 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d6645" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="registry-server" containerID="cri-o://8f10e627404a78f985ed34c424c6c8c6bb0109a3864d012f909714c526e1885e" gracePeriod=30 Oct 13 06:50:57 crc kubenswrapper[4664]: I1013 06:50:57.994321 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-p6fvb"] Oct 13 06:50:57 crc kubenswrapper[4664]: I1013 06:50:57.995174 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" podUID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" containerName="marketplace-operator" containerID="cri-o://7afcbc51532b7e7ccdcc08045df6b3107a6beb1907967517e9a12d3ce8a3175e" gracePeriod=30 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.004965 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gqwj"] Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.005265 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6gqwj" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="registry-server" containerID="cri-o://283e8af63a7c3cf26dc264cf564226b73ed4e6371edfb71bb0a618f6fdfce5ed" gracePeriod=30 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.023238 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cwwpm"] Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.023560 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cwwpm" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="registry-server" containerID="cri-o://324432bac4b4d13e00f35bf94e3d32028183d8be9dde8bcc48543f919d11273a" gracePeriod=30 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.027240 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rhmxl"] Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.035399 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.039382 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rhmxl"] Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.170303 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/acb034b5-2645-458a-91ae-14c42b6632b2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.170650 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/acb034b5-2645-458a-91ae-14c42b6632b2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.170899 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsjl6\" (UniqueName: \"kubernetes.io/projected/acb034b5-2645-458a-91ae-14c42b6632b2-kube-api-access-tsjl6\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.276498 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/acb034b5-2645-458a-91ae-14c42b6632b2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.276572 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/acb034b5-2645-458a-91ae-14c42b6632b2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.278391 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsjl6\" (UniqueName: \"kubernetes.io/projected/acb034b5-2645-458a-91ae-14c42b6632b2-kube-api-access-tsjl6\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.279398 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/acb034b5-2645-458a-91ae-14c42b6632b2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.301415 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/acb034b5-2645-458a-91ae-14c42b6632b2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.311316 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsjl6\" (UniqueName: \"kubernetes.io/projected/acb034b5-2645-458a-91ae-14c42b6632b2-kube-api-access-tsjl6\") pod \"marketplace-operator-79b997595-rhmxl\" (UID: \"acb034b5-2645-458a-91ae-14c42b6632b2\") " pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.353368 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.398359 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.456016 4664 generic.go:334] "Generic (PLEG): container finished" podID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerID="8f10e627404a78f985ed34c424c6c8c6bb0109a3864d012f909714c526e1885e" exitCode=0 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.456441 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6645" event={"ID":"b874d683-d666-4c72-8a82-7bfb88b53abd","Type":"ContainerDied","Data":"8f10e627404a78f985ed34c424c6c8c6bb0109a3864d012f909714c526e1885e"} Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.464460 4664 generic.go:334] "Generic (PLEG): container finished" podID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerID="324432bac4b4d13e00f35bf94e3d32028183d8be9dde8bcc48543f919d11273a" exitCode=0 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.464526 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerDied","Data":"324432bac4b4d13e00f35bf94e3d32028183d8be9dde8bcc48543f919d11273a"} Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.469763 4664 generic.go:334] "Generic (PLEG): container finished" podID="e478ea82-bff7-4555-9004-1252b36a6a77" containerID="283e8af63a7c3cf26dc264cf564226b73ed4e6371edfb71bb0a618f6fdfce5ed" exitCode=0 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.469874 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gqwj" event={"ID":"e478ea82-bff7-4555-9004-1252b36a6a77","Type":"ContainerDied","Data":"283e8af63a7c3cf26dc264cf564226b73ed4e6371edfb71bb0a618f6fdfce5ed"} Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.471983 4664 generic.go:334] "Generic (PLEG): container finished" podID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" containerID="7afcbc51532b7e7ccdcc08045df6b3107a6beb1907967517e9a12d3ce8a3175e" exitCode=0 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.472032 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" event={"ID":"5bf1cd76-69a8-496f-a6a9-1adc0022d829","Type":"ContainerDied","Data":"7afcbc51532b7e7ccdcc08045df6b3107a6beb1907967517e9a12d3ce8a3175e"} Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.474206 4664 generic.go:334] "Generic (PLEG): container finished" podID="2767d696-0a63-452a-931a-2634decc57d1" containerID="4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5" exitCode=0 Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.474232 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5htx8" event={"ID":"2767d696-0a63-452a-931a-2634decc57d1","Type":"ContainerDied","Data":"4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5"} Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.474251 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5htx8" event={"ID":"2767d696-0a63-452a-931a-2634decc57d1","Type":"ContainerDied","Data":"e6801dfb9a9ef47b18b361b3b7c722d68f218d489fd7743619c14d34f6f38705"} Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.474268 4664 scope.go:117] "RemoveContainer" containerID="4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.474428 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5htx8" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.484619 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-catalog-content\") pod \"2767d696-0a63-452a-931a-2634decc57d1\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.485486 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-utilities\") pod \"2767d696-0a63-452a-931a-2634decc57d1\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.485593 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw2kr\" (UniqueName: \"kubernetes.io/projected/2767d696-0a63-452a-931a-2634decc57d1-kube-api-access-cw2kr\") pod \"2767d696-0a63-452a-931a-2634decc57d1\" (UID: \"2767d696-0a63-452a-931a-2634decc57d1\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.489120 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-utilities" (OuterVolumeSpecName: "utilities") pod "2767d696-0a63-452a-931a-2634decc57d1" (UID: "2767d696-0a63-452a-931a-2634decc57d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.490432 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2767d696-0a63-452a-931a-2634decc57d1-kube-api-access-cw2kr" (OuterVolumeSpecName: "kube-api-access-cw2kr") pod "2767d696-0a63-452a-931a-2634decc57d1" (UID: "2767d696-0a63-452a-931a-2634decc57d1"). InnerVolumeSpecName "kube-api-access-cw2kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.525855 4664 scope.go:117] "RemoveContainer" containerID="5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.567266 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2767d696-0a63-452a-931a-2634decc57d1" (UID: "2767d696-0a63-452a-931a-2634decc57d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.577343 4664 scope.go:117] "RemoveContainer" containerID="c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.587739 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.587765 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2767d696-0a63-452a-931a-2634decc57d1-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.587776 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw2kr\" (UniqueName: \"kubernetes.io/projected/2767d696-0a63-452a-931a-2634decc57d1-kube-api-access-cw2kr\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.605899 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.617056 4664 scope.go:117] "RemoveContainer" containerID="4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5" Oct 13 06:50:58 crc kubenswrapper[4664]: E1013 06:50:58.618433 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5\": container with ID starting with 4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5 not found: ID does not exist" containerID="4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.618479 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5"} err="failed to get container status \"4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5\": rpc error: code = NotFound desc = could not find container \"4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5\": container with ID starting with 4a112d7788e7c11a27be9016dec2d739773ef8d492c81feb1c2c0c72294ae9e5 not found: ID does not exist" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.618508 4664 scope.go:117] "RemoveContainer" containerID="5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9" Oct 13 06:50:58 crc kubenswrapper[4664]: E1013 06:50:58.618834 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9\": container with ID starting with 5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9 not found: ID does not exist" containerID="5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.618860 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9"} err="failed to get container status \"5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9\": rpc error: code = NotFound desc = could not find container \"5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9\": container with ID starting with 5d389c2602d2482bfcd45b04c93507922a3b5e171e6787e426f22060ce8d80b9 not found: ID does not exist" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.618874 4664 scope.go:117] "RemoveContainer" containerID="c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529" Oct 13 06:50:58 crc kubenswrapper[4664]: E1013 06:50:58.619192 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529\": container with ID starting with c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529 not found: ID does not exist" containerID="c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.619211 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529"} err="failed to get container status \"c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529\": rpc error: code = NotFound desc = could not find container \"c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529\": container with ID starting with c241e7c4b90b6c39c38f31418c952d03135d6a0b13a878c9fb0c43c25214f529 not found: ID does not exist" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.648617 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.657266 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.688266 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-trusted-ca\") pod \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.688387 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8s7fv\" (UniqueName: \"kubernetes.io/projected/5bf1cd76-69a8-496f-a6a9-1adc0022d829-kube-api-access-8s7fv\") pod \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.688462 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-operator-metrics\") pod \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\" (UID: \"5bf1cd76-69a8-496f-a6a9-1adc0022d829\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.690811 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "5bf1cd76-69a8-496f-a6a9-1adc0022d829" (UID: "5bf1cd76-69a8-496f-a6a9-1adc0022d829"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.698343 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "5bf1cd76-69a8-496f-a6a9-1adc0022d829" (UID: "5bf1cd76-69a8-496f-a6a9-1adc0022d829"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.701284 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bf1cd76-69a8-496f-a6a9-1adc0022d829-kube-api-access-8s7fv" (OuterVolumeSpecName: "kube-api-access-8s7fv") pod "5bf1cd76-69a8-496f-a6a9-1adc0022d829" (UID: "5bf1cd76-69a8-496f-a6a9-1adc0022d829"). InnerVolumeSpecName "kube-api-access-8s7fv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.789441 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-utilities\") pod \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790116 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s42nk\" (UniqueName: \"kubernetes.io/projected/e478ea82-bff7-4555-9004-1252b36a6a77-kube-api-access-s42nk\") pod \"e478ea82-bff7-4555-9004-1252b36a6a77\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790174 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp5gk\" (UniqueName: \"kubernetes.io/projected/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-kube-api-access-wp5gk\") pod \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790225 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-utilities\") pod \"e478ea82-bff7-4555-9004-1252b36a6a77\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790247 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-catalog-content\") pod \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\" (UID: \"9283a98e-b18c-45bf-9f1c-3d2dac8b372e\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790248 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-utilities" (OuterVolumeSpecName: "utilities") pod "9283a98e-b18c-45bf-9f1c-3d2dac8b372e" (UID: "9283a98e-b18c-45bf-9f1c-3d2dac8b372e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790276 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-catalog-content\") pod \"e478ea82-bff7-4555-9004-1252b36a6a77\" (UID: \"e478ea82-bff7-4555-9004-1252b36a6a77\") " Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790780 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790814 4664 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790828 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8s7fv\" (UniqueName: \"kubernetes.io/projected/5bf1cd76-69a8-496f-a6a9-1adc0022d829-kube-api-access-8s7fv\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.790840 4664 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bf1cd76-69a8-496f-a6a9-1adc0022d829-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.792322 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-utilities" (OuterVolumeSpecName: "utilities") pod "e478ea82-bff7-4555-9004-1252b36a6a77" (UID: "e478ea82-bff7-4555-9004-1252b36a6a77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.795596 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e478ea82-bff7-4555-9004-1252b36a6a77-kube-api-access-s42nk" (OuterVolumeSpecName: "kube-api-access-s42nk") pod "e478ea82-bff7-4555-9004-1252b36a6a77" (UID: "e478ea82-bff7-4555-9004-1252b36a6a77"). InnerVolumeSpecName "kube-api-access-s42nk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.802079 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-kube-api-access-wp5gk" (OuterVolumeSpecName: "kube-api-access-wp5gk") pod "9283a98e-b18c-45bf-9f1c-3d2dac8b372e" (UID: "9283a98e-b18c-45bf-9f1c-3d2dac8b372e"). InnerVolumeSpecName "kube-api-access-wp5gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.810905 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e478ea82-bff7-4555-9004-1252b36a6a77" (UID: "e478ea82-bff7-4555-9004-1252b36a6a77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.815652 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5htx8"] Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.818919 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5htx8"] Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.889620 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9283a98e-b18c-45bf-9f1c-3d2dac8b372e" (UID: "9283a98e-b18c-45bf-9f1c-3d2dac8b372e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.892495 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s42nk\" (UniqueName: \"kubernetes.io/projected/e478ea82-bff7-4555-9004-1252b36a6a77-kube-api-access-s42nk\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.892544 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp5gk\" (UniqueName: \"kubernetes.io/projected/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-kube-api-access-wp5gk\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.892558 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.892571 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9283a98e-b18c-45bf-9f1c-3d2dac8b372e-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.892581 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e478ea82-bff7-4555-9004-1252b36a6a77-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:58 crc kubenswrapper[4664]: I1013 06:50:58.928471 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rhmxl"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.036362 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.054874 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2767d696-0a63-452a-931a-2634decc57d1" path="/var/lib/kubelet/pods/2767d696-0a63-452a-931a-2634decc57d1/volumes" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.197241 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkcxt\" (UniqueName: \"kubernetes.io/projected/b874d683-d666-4c72-8a82-7bfb88b53abd-kube-api-access-nkcxt\") pod \"b874d683-d666-4c72-8a82-7bfb88b53abd\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.197633 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-utilities\") pod \"b874d683-d666-4c72-8a82-7bfb88b53abd\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.197858 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-catalog-content\") pod \"b874d683-d666-4c72-8a82-7bfb88b53abd\" (UID: \"b874d683-d666-4c72-8a82-7bfb88b53abd\") " Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.199903 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-utilities" (OuterVolumeSpecName: "utilities") pod "b874d683-d666-4c72-8a82-7bfb88b53abd" (UID: "b874d683-d666-4c72-8a82-7bfb88b53abd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.203175 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b874d683-d666-4c72-8a82-7bfb88b53abd-kube-api-access-nkcxt" (OuterVolumeSpecName: "kube-api-access-nkcxt") pod "b874d683-d666-4c72-8a82-7bfb88b53abd" (UID: "b874d683-d666-4c72-8a82-7bfb88b53abd"). InnerVolumeSpecName "kube-api-access-nkcxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.259866 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b874d683-d666-4c72-8a82-7bfb88b53abd" (UID: "b874d683-d666-4c72-8a82-7bfb88b53abd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.299638 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkcxt\" (UniqueName: \"kubernetes.io/projected/b874d683-d666-4c72-8a82-7bfb88b53abd-kube-api-access-nkcxt\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.299689 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.299699 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b874d683-d666-4c72-8a82-7bfb88b53abd-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.482852 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" event={"ID":"acb034b5-2645-458a-91ae-14c42b6632b2","Type":"ContainerStarted","Data":"301c9bb0172896021d914312cce1f39c9f8ffc636dd0657b98264ff9b1e69139"} Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.483546 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.483669 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" event={"ID":"acb034b5-2645-458a-91ae-14c42b6632b2","Type":"ContainerStarted","Data":"c8bc3da950195452ba48d6d771882adee31f3710ac049baad3d64400436f7e0c"} Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.485556 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6645" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.485501 4664 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rhmxl container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.485745 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" podUID="acb034b5-2645-458a-91ae-14c42b6632b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.485548 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6645" event={"ID":"b874d683-d666-4c72-8a82-7bfb88b53abd","Type":"ContainerDied","Data":"9597d917f2f70ac3a1f9138194e14f878788871af8ce1e1c0181f22cbdba20a9"} Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.485949 4664 scope.go:117] "RemoveContainer" containerID="8f10e627404a78f985ed34c424c6c8c6bb0109a3864d012f909714c526e1885e" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.488658 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwwpm" event={"ID":"9283a98e-b18c-45bf-9f1c-3d2dac8b372e","Type":"ContainerDied","Data":"7101a953f91dd6f98b533d560e3844f610c1e74e99e5b16d946bd0c8c7f2c85a"} Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.488672 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwwpm" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.492914 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gqwj" event={"ID":"e478ea82-bff7-4555-9004-1252b36a6a77","Type":"ContainerDied","Data":"c0403a6fded88996dc8077b8fd0f3cd46f6b4132473aa4fcf29d56be304f8d74"} Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.493212 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gqwj" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.495847 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" event={"ID":"5bf1cd76-69a8-496f-a6a9-1adc0022d829","Type":"ContainerDied","Data":"00b43b6962d46f49494882708c2859ea764b4b5d3680cb457d8c3428ef289abf"} Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.495958 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-p6fvb" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.506691 4664 scope.go:117] "RemoveContainer" containerID="036f5897382688c55519b614452bc9a3edfcfade0dc7a436d2097455783ee3af" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.524166 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" podStartSLOduration=1.524142342 podStartE2EDuration="1.524142342s" podCreationTimestamp="2025-10-13 06:50:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:50:59.522664207 +0000 UTC m=+267.210109409" watchObservedRunningTime="2025-10-13 06:50:59.524142342 +0000 UTC m=+267.211587534" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.537429 4664 scope.go:117] "RemoveContainer" containerID="30246692bf98161378afc2133edf374dd79e2976f16eb04aa7a8660a14f4a90f" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.560413 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gqwj"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.560703 4664 scope.go:117] "RemoveContainer" containerID="324432bac4b4d13e00f35bf94e3d32028183d8be9dde8bcc48543f919d11273a" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.588437 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gqwj"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.597410 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cwwpm"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.608386 4664 scope.go:117] "RemoveContainer" containerID="675bbbd47d2d3ed138471c13fc8b1a5ef0f0d5f18fae58d8070fd2e2b4a92638" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.613103 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cwwpm"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.620897 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-p6fvb"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.631019 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-p6fvb"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.634553 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d6645"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.636384 4664 scope.go:117] "RemoveContainer" containerID="e5e467b349faba6f7a21940a28d9c49dabb123ddc30cf05998c81bc4c1557389" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.638202 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d6645"] Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.668666 4664 scope.go:117] "RemoveContainer" containerID="283e8af63a7c3cf26dc264cf564226b73ed4e6371edfb71bb0a618f6fdfce5ed" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.696938 4664 scope.go:117] "RemoveContainer" containerID="51fd7d3c3ce81e6ea55418e95eb8617717ee339fc71c7cba8260090894536683" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.714581 4664 scope.go:117] "RemoveContainer" containerID="b59321754374f22b65fed80d8cef6ec8eede92ea3919d2084bb6f98b8b126564" Oct 13 06:50:59 crc kubenswrapper[4664]: I1013 06:50:59.725903 4664 scope.go:117] "RemoveContainer" containerID="7afcbc51532b7e7ccdcc08045df6b3107a6beb1907967517e9a12d3ce8a3175e" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.182805 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5b7pw"] Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.184798 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.184922 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.185022 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.185114 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.185219 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.185300 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.185388 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.185474 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.185557 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.188410 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.188574 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.188670 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.188748 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" containerName="marketplace-operator" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.188858 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" containerName="marketplace-operator" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.188958 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.189049 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.189152 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.189232 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.189311 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.189400 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="extract-content" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.189486 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.189566 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.189729 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.189851 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="extract-utilities" Oct 13 06:51:00 crc kubenswrapper[4664]: E1013 06:51:00.189951 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.190035 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.190292 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.190390 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" containerName="marketplace-operator" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.190477 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.190567 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="2767d696-0a63-452a-931a-2634decc57d1" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.190688 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" containerName="registry-server" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.191916 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.195931 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5b7pw"] Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.199629 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.322738 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8hxh\" (UniqueName: \"kubernetes.io/projected/41825a43-78e2-42f0-aec8-2778276d69d8-kube-api-access-l8hxh\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.322800 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41825a43-78e2-42f0-aec8-2778276d69d8-catalog-content\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.322916 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41825a43-78e2-42f0-aec8-2778276d69d8-utilities\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.385265 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9hkbn"] Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.386526 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.388777 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.398130 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9hkbn"] Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.425840 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41825a43-78e2-42f0-aec8-2778276d69d8-utilities\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.425925 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8hxh\" (UniqueName: \"kubernetes.io/projected/41825a43-78e2-42f0-aec8-2778276d69d8-kube-api-access-l8hxh\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.425955 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41825a43-78e2-42f0-aec8-2778276d69d8-catalog-content\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.426708 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41825a43-78e2-42f0-aec8-2778276d69d8-catalog-content\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.427143 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41825a43-78e2-42f0-aec8-2778276d69d8-utilities\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.449215 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8hxh\" (UniqueName: \"kubernetes.io/projected/41825a43-78e2-42f0-aec8-2778276d69d8-kube-api-access-l8hxh\") pod \"redhat-marketplace-5b7pw\" (UID: \"41825a43-78e2-42f0-aec8-2778276d69d8\") " pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.512402 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.522848 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.531159 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-utilities\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.531287 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-catalog-content\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.531325 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25pjj\" (UniqueName: \"kubernetes.io/projected/06d97f83-9d40-4f8e-bf46-46275670fa82-kube-api-access-25pjj\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.632872 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-catalog-content\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.632914 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25pjj\" (UniqueName: \"kubernetes.io/projected/06d97f83-9d40-4f8e-bf46-46275670fa82-kube-api-access-25pjj\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.632961 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-utilities\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.633829 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-utilities\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.634660 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-catalog-content\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.657667 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25pjj\" (UniqueName: \"kubernetes.io/projected/06d97f83-9d40-4f8e-bf46-46275670fa82-kube-api-access-25pjj\") pod \"redhat-operators-9hkbn\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.709199 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.924038 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9hkbn"] Oct 13 06:51:00 crc kubenswrapper[4664]: W1013 06:51:00.933107 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06d97f83_9d40_4f8e_bf46_46275670fa82.slice/crio-92e85322d0b772b29d1b7f5e349b4702b6cd8bf795b6cc0607417a84f730614f WatchSource:0}: Error finding container 92e85322d0b772b29d1b7f5e349b4702b6cd8bf795b6cc0607417a84f730614f: Status 404 returned error can't find the container with id 92e85322d0b772b29d1b7f5e349b4702b6cd8bf795b6cc0607417a84f730614f Oct 13 06:51:00 crc kubenswrapper[4664]: I1013 06:51:00.970395 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5b7pw"] Oct 13 06:51:00 crc kubenswrapper[4664]: W1013 06:51:00.976433 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41825a43_78e2_42f0_aec8_2778276d69d8.slice/crio-5f86ef1e54aa257e85346cae8023c12aaa43474ffc2f62ed8d4e97e6eba3f9e2 WatchSource:0}: Error finding container 5f86ef1e54aa257e85346cae8023c12aaa43474ffc2f62ed8d4e97e6eba3f9e2: Status 404 returned error can't find the container with id 5f86ef1e54aa257e85346cae8023c12aaa43474ffc2f62ed8d4e97e6eba3f9e2 Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.060893 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bf1cd76-69a8-496f-a6a9-1adc0022d829" path="/var/lib/kubelet/pods/5bf1cd76-69a8-496f-a6a9-1adc0022d829/volumes" Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.062274 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9283a98e-b18c-45bf-9f1c-3d2dac8b372e" path="/var/lib/kubelet/pods/9283a98e-b18c-45bf-9f1c-3d2dac8b372e/volumes" Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.062880 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b874d683-d666-4c72-8a82-7bfb88b53abd" path="/var/lib/kubelet/pods/b874d683-d666-4c72-8a82-7bfb88b53abd/volumes" Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.063501 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e478ea82-bff7-4555-9004-1252b36a6a77" path="/var/lib/kubelet/pods/e478ea82-bff7-4555-9004-1252b36a6a77/volumes" Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.519117 4664 generic.go:334] "Generic (PLEG): container finished" podID="41825a43-78e2-42f0-aec8-2778276d69d8" containerID="68186925beca4fe5ea4ba2ef0a5edb5e91b6cd1322142557affd51fb559eeab4" exitCode=0 Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.519217 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5b7pw" event={"ID":"41825a43-78e2-42f0-aec8-2778276d69d8","Type":"ContainerDied","Data":"68186925beca4fe5ea4ba2ef0a5edb5e91b6cd1322142557affd51fb559eeab4"} Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.519329 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5b7pw" event={"ID":"41825a43-78e2-42f0-aec8-2778276d69d8","Type":"ContainerStarted","Data":"5f86ef1e54aa257e85346cae8023c12aaa43474ffc2f62ed8d4e97e6eba3f9e2"} Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.523015 4664 generic.go:334] "Generic (PLEG): container finished" podID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerID="ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256" exitCode=0 Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.523907 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hkbn" event={"ID":"06d97f83-9d40-4f8e-bf46-46275670fa82","Type":"ContainerDied","Data":"ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256"} Oct 13 06:51:01 crc kubenswrapper[4664]: I1013 06:51:01.523979 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hkbn" event={"ID":"06d97f83-9d40-4f8e-bf46-46275670fa82","Type":"ContainerStarted","Data":"92e85322d0b772b29d1b7f5e349b4702b6cd8bf795b6cc0607417a84f730614f"} Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.581172 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bbwgq"] Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.586309 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.591542 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.593734 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bbwgq"] Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.670932 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eef97848-b083-4ac5-a9bd-5b8f047b420b-catalog-content\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.671056 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf4t8\" (UniqueName: \"kubernetes.io/projected/eef97848-b083-4ac5-a9bd-5b8f047b420b-kube-api-access-pf4t8\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.671084 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eef97848-b083-4ac5-a9bd-5b8f047b420b-utilities\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.777024 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf4t8\" (UniqueName: \"kubernetes.io/projected/eef97848-b083-4ac5-a9bd-5b8f047b420b-kube-api-access-pf4t8\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.777096 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eef97848-b083-4ac5-a9bd-5b8f047b420b-utilities\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.777142 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eef97848-b083-4ac5-a9bd-5b8f047b420b-catalog-content\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.779307 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eef97848-b083-4ac5-a9bd-5b8f047b420b-catalog-content\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.781671 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eef97848-b083-4ac5-a9bd-5b8f047b420b-utilities\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.794513 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vdjxj"] Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.796086 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.798995 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.812659 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdjxj"] Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.823194 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf4t8\" (UniqueName: \"kubernetes.io/projected/eef97848-b083-4ac5-a9bd-5b8f047b420b-kube-api-access-pf4t8\") pod \"certified-operators-bbwgq\" (UID: \"eef97848-b083-4ac5-a9bd-5b8f047b420b\") " pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.878159 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpdhq\" (UniqueName: \"kubernetes.io/projected/389566e0-cafb-4bd2-aa8b-76efccfa1048-kube-api-access-fpdhq\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.878536 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-catalog-content\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.878650 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-utilities\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.915279 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.980262 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpdhq\" (UniqueName: \"kubernetes.io/projected/389566e0-cafb-4bd2-aa8b-76efccfa1048-kube-api-access-fpdhq\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.980350 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-catalog-content\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.980366 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-utilities\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.981273 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-utilities\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:02 crc kubenswrapper[4664]: I1013 06:51:02.982819 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-catalog-content\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.002332 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpdhq\" (UniqueName: \"kubernetes.io/projected/389566e0-cafb-4bd2-aa8b-76efccfa1048-kube-api-access-fpdhq\") pod \"community-operators-vdjxj\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.191420 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.411028 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bbwgq"] Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.537297 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bbwgq" event={"ID":"eef97848-b083-4ac5-a9bd-5b8f047b420b","Type":"ContainerStarted","Data":"65729e34849cc5c008ad83439e9a7818b6a1e18326b05b9522c7c34f9ee0335b"} Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.540025 4664 generic.go:334] "Generic (PLEG): container finished" podID="41825a43-78e2-42f0-aec8-2778276d69d8" containerID="4482b8f3cc84eec949956d14b2c0120024f383f33b2a3d8f38f800a3542b22a6" exitCode=0 Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.540091 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5b7pw" event={"ID":"41825a43-78e2-42f0-aec8-2778276d69d8","Type":"ContainerDied","Data":"4482b8f3cc84eec949956d14b2c0120024f383f33b2a3d8f38f800a3542b22a6"} Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.544784 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hkbn" event={"ID":"06d97f83-9d40-4f8e-bf46-46275670fa82","Type":"ContainerDied","Data":"68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0"} Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.544702 4664 generic.go:334] "Generic (PLEG): container finished" podID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerID="68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0" exitCode=0 Oct 13 06:51:03 crc kubenswrapper[4664]: I1013 06:51:03.662852 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdjxj"] Oct 13 06:51:03 crc kubenswrapper[4664]: W1013 06:51:03.673350 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod389566e0_cafb_4bd2_aa8b_76efccfa1048.slice/crio-e1e51707cfa66cfc058bc6d176e005c4ec6eff45013e3e5c459fa4f2f99dec3a WatchSource:0}: Error finding container e1e51707cfa66cfc058bc6d176e005c4ec6eff45013e3e5c459fa4f2f99dec3a: Status 404 returned error can't find the container with id e1e51707cfa66cfc058bc6d176e005c4ec6eff45013e3e5c459fa4f2f99dec3a Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.555126 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hkbn" event={"ID":"06d97f83-9d40-4f8e-bf46-46275670fa82","Type":"ContainerStarted","Data":"92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3"} Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.560172 4664 generic.go:334] "Generic (PLEG): container finished" podID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerID="157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37" exitCode=0 Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.560303 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdjxj" event={"ID":"389566e0-cafb-4bd2-aa8b-76efccfa1048","Type":"ContainerDied","Data":"157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37"} Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.560342 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdjxj" event={"ID":"389566e0-cafb-4bd2-aa8b-76efccfa1048","Type":"ContainerStarted","Data":"e1e51707cfa66cfc058bc6d176e005c4ec6eff45013e3e5c459fa4f2f99dec3a"} Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.571489 4664 generic.go:334] "Generic (PLEG): container finished" podID="eef97848-b083-4ac5-a9bd-5b8f047b420b" containerID="2aad066a975156aa6ff61651446b853c97f6ea0422e022c95d6aa9f292d1194f" exitCode=0 Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.571557 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bbwgq" event={"ID":"eef97848-b083-4ac5-a9bd-5b8f047b420b","Type":"ContainerDied","Data":"2aad066a975156aa6ff61651446b853c97f6ea0422e022c95d6aa9f292d1194f"} Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.574521 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5b7pw" event={"ID":"41825a43-78e2-42f0-aec8-2778276d69d8","Type":"ContainerStarted","Data":"77f2d054cda76f67f0a1af0675e2ac31270a35a2e02034d1c4b70c08f9602154"} Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.579016 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9hkbn" podStartSLOduration=2.118965829 podStartE2EDuration="4.578992234s" podCreationTimestamp="2025-10-13 06:51:00 +0000 UTC" firstStartedPulling="2025-10-13 06:51:01.527867214 +0000 UTC m=+269.215312436" lastFinishedPulling="2025-10-13 06:51:03.987893649 +0000 UTC m=+271.675338841" observedRunningTime="2025-10-13 06:51:04.578249917 +0000 UTC m=+272.265695109" watchObservedRunningTime="2025-10-13 06:51:04.578992234 +0000 UTC m=+272.266437426" Oct 13 06:51:04 crc kubenswrapper[4664]: I1013 06:51:04.637890 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5b7pw" podStartSLOduration=2.146571492 podStartE2EDuration="4.637858288s" podCreationTimestamp="2025-10-13 06:51:00 +0000 UTC" firstStartedPulling="2025-10-13 06:51:01.521967814 +0000 UTC m=+269.209413016" lastFinishedPulling="2025-10-13 06:51:04.01325462 +0000 UTC m=+271.700699812" observedRunningTime="2025-10-13 06:51:04.635694927 +0000 UTC m=+272.323140139" watchObservedRunningTime="2025-10-13 06:51:04.637858288 +0000 UTC m=+272.325303490" Oct 13 06:51:05 crc kubenswrapper[4664]: I1013 06:51:05.583928 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdjxj" event={"ID":"389566e0-cafb-4bd2-aa8b-76efccfa1048","Type":"ContainerStarted","Data":"a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873"} Oct 13 06:51:05 crc kubenswrapper[4664]: I1013 06:51:05.589006 4664 generic.go:334] "Generic (PLEG): container finished" podID="eef97848-b083-4ac5-a9bd-5b8f047b420b" containerID="1d24a8cb53e2fa7028d9ee6d91f81736038c836715bee2acf5f6312cdaeb81ba" exitCode=0 Oct 13 06:51:05 crc kubenswrapper[4664]: I1013 06:51:05.589922 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bbwgq" event={"ID":"eef97848-b083-4ac5-a9bd-5b8f047b420b","Type":"ContainerDied","Data":"1d24a8cb53e2fa7028d9ee6d91f81736038c836715bee2acf5f6312cdaeb81ba"} Oct 13 06:51:06 crc kubenswrapper[4664]: I1013 06:51:06.597774 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bbwgq" event={"ID":"eef97848-b083-4ac5-a9bd-5b8f047b420b","Type":"ContainerStarted","Data":"4860a5b53cb1f5a1e22c2200b57c7606e7685c0d5200a3938a82281c8aba8585"} Oct 13 06:51:06 crc kubenswrapper[4664]: I1013 06:51:06.604955 4664 generic.go:334] "Generic (PLEG): container finished" podID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerID="a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873" exitCode=0 Oct 13 06:51:06 crc kubenswrapper[4664]: I1013 06:51:06.605025 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdjxj" event={"ID":"389566e0-cafb-4bd2-aa8b-76efccfa1048","Type":"ContainerDied","Data":"a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873"} Oct 13 06:51:06 crc kubenswrapper[4664]: I1013 06:51:06.624667 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bbwgq" podStartSLOduration=2.993317765 podStartE2EDuration="4.624638369s" podCreationTimestamp="2025-10-13 06:51:02 +0000 UTC" firstStartedPulling="2025-10-13 06:51:04.573856114 +0000 UTC m=+272.261301306" lastFinishedPulling="2025-10-13 06:51:06.205176718 +0000 UTC m=+273.892621910" observedRunningTime="2025-10-13 06:51:06.622257213 +0000 UTC m=+274.309702425" watchObservedRunningTime="2025-10-13 06:51:06.624638369 +0000 UTC m=+274.312083561" Oct 13 06:51:08 crc kubenswrapper[4664]: I1013 06:51:08.644122 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdjxj" event={"ID":"389566e0-cafb-4bd2-aa8b-76efccfa1048","Type":"ContainerStarted","Data":"816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9"} Oct 13 06:51:08 crc kubenswrapper[4664]: I1013 06:51:08.671255 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vdjxj" podStartSLOduration=3.972639203 podStartE2EDuration="6.671232056s" podCreationTimestamp="2025-10-13 06:51:02 +0000 UTC" firstStartedPulling="2025-10-13 06:51:04.570162696 +0000 UTC m=+272.257607888" lastFinishedPulling="2025-10-13 06:51:07.268755549 +0000 UTC m=+274.956200741" observedRunningTime="2025-10-13 06:51:08.668339497 +0000 UTC m=+276.355784709" watchObservedRunningTime="2025-10-13 06:51:08.671232056 +0000 UTC m=+276.358677238" Oct 13 06:51:10 crc kubenswrapper[4664]: I1013 06:51:10.523159 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:10 crc kubenswrapper[4664]: I1013 06:51:10.523717 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:10 crc kubenswrapper[4664]: I1013 06:51:10.589650 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:10 crc kubenswrapper[4664]: I1013 06:51:10.710275 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:10 crc kubenswrapper[4664]: I1013 06:51:10.712925 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:10 crc kubenswrapper[4664]: I1013 06:51:10.717533 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 06:51:10 crc kubenswrapper[4664]: I1013 06:51:10.786063 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:11 crc kubenswrapper[4664]: I1013 06:51:11.712272 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 06:51:12 crc kubenswrapper[4664]: I1013 06:51:12.916319 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:12 crc kubenswrapper[4664]: I1013 06:51:12.917030 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:12 crc kubenswrapper[4664]: I1013 06:51:12.958974 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:13 crc kubenswrapper[4664]: I1013 06:51:13.192447 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:13 crc kubenswrapper[4664]: I1013 06:51:13.192508 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:13 crc kubenswrapper[4664]: I1013 06:51:13.232104 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:51:13 crc kubenswrapper[4664]: I1013 06:51:13.737591 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bbwgq" Oct 13 06:51:13 crc kubenswrapper[4664]: I1013 06:51:13.746251 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 06:52:28 crc kubenswrapper[4664]: I1013 06:52:28.812586 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:52:28 crc kubenswrapper[4664]: I1013 06:52:28.813496 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:52:58 crc kubenswrapper[4664]: I1013 06:52:58.812169 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:52:58 crc kubenswrapper[4664]: I1013 06:52:58.813130 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:53:28 crc kubenswrapper[4664]: I1013 06:53:28.812499 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:53:28 crc kubenswrapper[4664]: I1013 06:53:28.813270 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:53:28 crc kubenswrapper[4664]: I1013 06:53:28.813348 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:53:28 crc kubenswrapper[4664]: I1013 06:53:28.814250 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dc7a7f9feba16667d03ca95a6bd98653a84020768447659f114a7e08147be5b2"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 06:53:28 crc kubenswrapper[4664]: I1013 06:53:28.814317 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://dc7a7f9feba16667d03ca95a6bd98653a84020768447659f114a7e08147be5b2" gracePeriod=600 Oct 13 06:53:29 crc kubenswrapper[4664]: I1013 06:53:29.719645 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="dc7a7f9feba16667d03ca95a6bd98653a84020768447659f114a7e08147be5b2" exitCode=0 Oct 13 06:53:29 crc kubenswrapper[4664]: I1013 06:53:29.719748 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"dc7a7f9feba16667d03ca95a6bd98653a84020768447659f114a7e08147be5b2"} Oct 13 06:53:29 crc kubenswrapper[4664]: I1013 06:53:29.720570 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"8bc911ca0dfde386d072e0e4fe8a1821b2571d5b4a57fb451b33ca409337c948"} Oct 13 06:53:29 crc kubenswrapper[4664]: I1013 06:53:29.720606 4664 scope.go:117] "RemoveContainer" containerID="d7b29182b344187d2446bcb18b710fbbe1df2e5d97ab526276b03fbf9fde38ab" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.428115 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pn4nx"] Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.429568 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.447517 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pn4nx"] Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528034 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lldjg\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-kube-api-access-lldjg\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528476 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528527 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528569 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-bound-sa-token\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528617 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-registry-tls\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528657 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-trusted-ca\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528680 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.528723 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-registry-certificates\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.554504 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.630208 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lldjg\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-kube-api-access-lldjg\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.630272 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.630306 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-bound-sa-token\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.630347 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-registry-tls\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.630384 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-trusted-ca\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.630407 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.630453 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-registry-certificates\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.631235 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.631892 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-registry-certificates\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.634872 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-trusted-ca\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.638746 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-registry-tls\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.646418 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.649050 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lldjg\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-kube-api-access-lldjg\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.650658 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f7e8f602-61a5-41da-8f9f-16fd34cd5e97-bound-sa-token\") pod \"image-registry-66df7c8f76-pn4nx\" (UID: \"f7e8f602-61a5-41da-8f9f-16fd34cd5e97\") " pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.748413 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:46 crc kubenswrapper[4664]: I1013 06:54:46.980146 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pn4nx"] Oct 13 06:54:47 crc kubenswrapper[4664]: I1013 06:54:47.291345 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" event={"ID":"f7e8f602-61a5-41da-8f9f-16fd34cd5e97","Type":"ContainerStarted","Data":"e08c82cec1e00936bce6d92ff9d01533587a8e93e65f233303cddbc0cdd1efc6"} Oct 13 06:54:47 crc kubenswrapper[4664]: I1013 06:54:47.291692 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" event={"ID":"f7e8f602-61a5-41da-8f9f-16fd34cd5e97","Type":"ContainerStarted","Data":"a49637db0f96ca89653fe14d4538d608abd612084db5c42c853cfa14f5ae6ba1"} Oct 13 06:54:47 crc kubenswrapper[4664]: I1013 06:54:47.291784 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:54:47 crc kubenswrapper[4664]: I1013 06:54:47.322848 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" podStartSLOduration=1.322550115 podStartE2EDuration="1.322550115s" podCreationTimestamp="2025-10-13 06:54:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:54:47.31749637 +0000 UTC m=+495.004941592" watchObservedRunningTime="2025-10-13 06:54:47.322550115 +0000 UTC m=+495.009995307" Oct 13 06:55:06 crc kubenswrapper[4664]: I1013 06:55:06.762907 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-pn4nx" Oct 13 06:55:06 crc kubenswrapper[4664]: I1013 06:55:06.862318 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tfpbq"] Oct 13 06:55:31 crc kubenswrapper[4664]: I1013 06:55:31.919459 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" podUID="82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" containerName="registry" containerID="cri-o://eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f" gracePeriod=30 Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.334264 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389448 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-trusted-ca\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389659 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389705 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-ca-trust-extracted\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389758 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-tls\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389791 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fgxc\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-kube-api-access-9fgxc\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389862 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-installation-pull-secrets\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389911 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-bound-sa-token\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.389953 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-certificates\") pod \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\" (UID: \"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6\") " Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.391669 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.391693 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.397297 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.398259 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.399195 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.400595 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-kube-api-access-9fgxc" (OuterVolumeSpecName: "kube-api-access-9fgxc") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "kube-api-access-9fgxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.401914 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.408731 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" (UID: "82fc333c-d337-4fdb-8fd7-3dc9cd303ff6"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.491843 4664 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.491895 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fgxc\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-kube-api-access-9fgxc\") on node \"crc\" DevicePath \"\"" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.491908 4664 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.491919 4664 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.491931 4664 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.491940 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.491949 4664 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.623764 4664 generic.go:334] "Generic (PLEG): container finished" podID="82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" containerID="eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f" exitCode=0 Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.623863 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" event={"ID":"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6","Type":"ContainerDied","Data":"eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f"} Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.623926 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.623955 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tfpbq" event={"ID":"82fc333c-d337-4fdb-8fd7-3dc9cd303ff6","Type":"ContainerDied","Data":"0f177798c2cbb74b6479796f1bec2c0b77c41193fb03e86591d54be05f8167dc"} Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.623992 4664 scope.go:117] "RemoveContainer" containerID="eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.658262 4664 scope.go:117] "RemoveContainer" containerID="eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f" Oct 13 06:55:32 crc kubenswrapper[4664]: E1013 06:55:32.660033 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f\": container with ID starting with eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f not found: ID does not exist" containerID="eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.660093 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f"} err="failed to get container status \"eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f\": rpc error: code = NotFound desc = could not find container \"eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f\": container with ID starting with eaa3b0d4941d800cd43b495cf6d6a48a0c57cdede95fac70e6ae0e916fe2818f not found: ID does not exist" Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.670779 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tfpbq"] Oct 13 06:55:32 crc kubenswrapper[4664]: I1013 06:55:32.678823 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tfpbq"] Oct 13 06:55:33 crc kubenswrapper[4664]: I1013 06:55:33.060062 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" path="/var/lib/kubelet/pods/82fc333c-d337-4fdb-8fd7-3dc9cd303ff6/volumes" Oct 13 06:55:33 crc kubenswrapper[4664]: I1013 06:55:33.276932 4664 scope.go:117] "RemoveContainer" containerID="011a1cc93e4b8354930e88dafdb970e13943a506cff93d406866b38c1be199de" Oct 13 06:55:58 crc kubenswrapper[4664]: I1013 06:55:58.811951 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:55:58 crc kubenswrapper[4664]: I1013 06:55:58.812998 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.802726 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-nd2p7"] Oct 13 06:56:08 crc kubenswrapper[4664]: E1013 06:56:08.803680 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" containerName="registry" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.803697 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" containerName="registry" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.803842 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="82fc333c-d337-4fdb-8fd7-3dc9cd303ff6" containerName="registry" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.804396 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.806471 4664 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-hzfd9" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.807595 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.810013 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.811742 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-ggcmz"] Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.812637 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-ggcmz" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.814547 4664 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-6l2fs" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.824549 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-nd2p7"] Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.835878 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-5p4jw"] Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.836629 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.842166 4664 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-lhgll" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.856401 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-ggcmz"] Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.874879 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rjd4\" (UniqueName: \"kubernetes.io/projected/201a4602-9190-430c-b6c4-52c0b6584883-kube-api-access-9rjd4\") pod \"cert-manager-cainjector-7f985d654d-nd2p7\" (UID: \"201a4602-9190-430c-b6c4-52c0b6584883\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.874933 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxvs5\" (UniqueName: \"kubernetes.io/projected/1979a4d1-7c0f-4eee-960e-66863300377d-kube-api-access-qxvs5\") pod \"cert-manager-5b446d88c5-ggcmz\" (UID: \"1979a4d1-7c0f-4eee-960e-66863300377d\") " pod="cert-manager/cert-manager-5b446d88c5-ggcmz" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.874982 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mqzq\" (UniqueName: \"kubernetes.io/projected/4f2dd003-654f-4e3d-9fb9-cbea80c68acd-kube-api-access-8mqzq\") pod \"cert-manager-webhook-5655c58dd6-5p4jw\" (UID: \"4f2dd003-654f-4e3d-9fb9-cbea80c68acd\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.904002 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-5p4jw"] Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.975929 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mqzq\" (UniqueName: \"kubernetes.io/projected/4f2dd003-654f-4e3d-9fb9-cbea80c68acd-kube-api-access-8mqzq\") pod \"cert-manager-webhook-5655c58dd6-5p4jw\" (UID: \"4f2dd003-654f-4e3d-9fb9-cbea80c68acd\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.975996 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rjd4\" (UniqueName: \"kubernetes.io/projected/201a4602-9190-430c-b6c4-52c0b6584883-kube-api-access-9rjd4\") pod \"cert-manager-cainjector-7f985d654d-nd2p7\" (UID: \"201a4602-9190-430c-b6c4-52c0b6584883\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.976033 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxvs5\" (UniqueName: \"kubernetes.io/projected/1979a4d1-7c0f-4eee-960e-66863300377d-kube-api-access-qxvs5\") pod \"cert-manager-5b446d88c5-ggcmz\" (UID: \"1979a4d1-7c0f-4eee-960e-66863300377d\") " pod="cert-manager/cert-manager-5b446d88c5-ggcmz" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.995958 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mqzq\" (UniqueName: \"kubernetes.io/projected/4f2dd003-654f-4e3d-9fb9-cbea80c68acd-kube-api-access-8mqzq\") pod \"cert-manager-webhook-5655c58dd6-5p4jw\" (UID: \"4f2dd003-654f-4e3d-9fb9-cbea80c68acd\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.996084 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rjd4\" (UniqueName: \"kubernetes.io/projected/201a4602-9190-430c-b6c4-52c0b6584883-kube-api-access-9rjd4\") pod \"cert-manager-cainjector-7f985d654d-nd2p7\" (UID: \"201a4602-9190-430c-b6c4-52c0b6584883\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" Oct 13 06:56:08 crc kubenswrapper[4664]: I1013 06:56:08.997635 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxvs5\" (UniqueName: \"kubernetes.io/projected/1979a4d1-7c0f-4eee-960e-66863300377d-kube-api-access-qxvs5\") pod \"cert-manager-5b446d88c5-ggcmz\" (UID: \"1979a4d1-7c0f-4eee-960e-66863300377d\") " pod="cert-manager/cert-manager-5b446d88c5-ggcmz" Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.131593 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.143555 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-ggcmz" Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.156158 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.609972 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-ggcmz"] Oct 13 06:56:09 crc kubenswrapper[4664]: W1013 06:56:09.620025 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1979a4d1_7c0f_4eee_960e_66863300377d.slice/crio-7dcc532aac0c67c793bc124f935837153a780c735e6ebbf5db91c2d41db17a1b WatchSource:0}: Error finding container 7dcc532aac0c67c793bc124f935837153a780c735e6ebbf5db91c2d41db17a1b: Status 404 returned error can't find the container with id 7dcc532aac0c67c793bc124f935837153a780c735e6ebbf5db91c2d41db17a1b Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.622320 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.656110 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-5p4jw"] Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.658557 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-nd2p7"] Oct 13 06:56:09 crc kubenswrapper[4664]: W1013 06:56:09.667071 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod201a4602_9190_430c_b6c4_52c0b6584883.slice/crio-6f8d63131040539c97f171b6cc62c938528fb0f8395917cfa1d8b7a1944e5a76 WatchSource:0}: Error finding container 6f8d63131040539c97f171b6cc62c938528fb0f8395917cfa1d8b7a1944e5a76: Status 404 returned error can't find the container with id 6f8d63131040539c97f171b6cc62c938528fb0f8395917cfa1d8b7a1944e5a76 Oct 13 06:56:09 crc kubenswrapper[4664]: W1013 06:56:09.670457 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f2dd003_654f_4e3d_9fb9_cbea80c68acd.slice/crio-5495ad04ad6b69fa7ca86a5f3b22d6e5924b15cd1ea991c2d955c988635a10a9 WatchSource:0}: Error finding container 5495ad04ad6b69fa7ca86a5f3b22d6e5924b15cd1ea991c2d955c988635a10a9: Status 404 returned error can't find the container with id 5495ad04ad6b69fa7ca86a5f3b22d6e5924b15cd1ea991c2d955c988635a10a9 Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.891953 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" event={"ID":"4f2dd003-654f-4e3d-9fb9-cbea80c68acd","Type":"ContainerStarted","Data":"5495ad04ad6b69fa7ca86a5f3b22d6e5924b15cd1ea991c2d955c988635a10a9"} Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.893262 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-ggcmz" event={"ID":"1979a4d1-7c0f-4eee-960e-66863300377d","Type":"ContainerStarted","Data":"7dcc532aac0c67c793bc124f935837153a780c735e6ebbf5db91c2d41db17a1b"} Oct 13 06:56:09 crc kubenswrapper[4664]: I1013 06:56:09.894395 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" event={"ID":"201a4602-9190-430c-b6c4-52c0b6584883","Type":"ContainerStarted","Data":"6f8d63131040539c97f171b6cc62c938528fb0f8395917cfa1d8b7a1944e5a76"} Oct 13 06:56:12 crc kubenswrapper[4664]: I1013 06:56:12.926440 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-ggcmz" event={"ID":"1979a4d1-7c0f-4eee-960e-66863300377d","Type":"ContainerStarted","Data":"97485f725f95b8f89482ccdc6f08c19d1450b02d0966fdd0755a3660bfd44292"} Oct 13 06:56:13 crc kubenswrapper[4664]: I1013 06:56:13.071534 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-ggcmz" podStartSLOduration=1.999038572 podStartE2EDuration="5.071499536s" podCreationTimestamp="2025-10-13 06:56:08 +0000 UTC" firstStartedPulling="2025-10-13 06:56:09.62188693 +0000 UTC m=+577.309332132" lastFinishedPulling="2025-10-13 06:56:12.694347904 +0000 UTC m=+580.381793096" observedRunningTime="2025-10-13 06:56:12.949293822 +0000 UTC m=+580.636739034" watchObservedRunningTime="2025-10-13 06:56:13.071499536 +0000 UTC m=+580.758944748" Oct 13 06:56:13 crc kubenswrapper[4664]: I1013 06:56:13.935241 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" event={"ID":"4f2dd003-654f-4e3d-9fb9-cbea80c68acd","Type":"ContainerStarted","Data":"f972a48565e418ca4f7b61a2fabded181dd2fdae199fc6672f8c3fac38991072"} Oct 13 06:56:13 crc kubenswrapper[4664]: I1013 06:56:13.935949 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" Oct 13 06:56:13 crc kubenswrapper[4664]: I1013 06:56:13.938424 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" event={"ID":"201a4602-9190-430c-b6c4-52c0b6584883","Type":"ContainerStarted","Data":"fc8182ebb14b7cfb4469877e05434923f310de47138da6ac5ffb3956e6dc36e3"} Oct 13 06:56:13 crc kubenswrapper[4664]: I1013 06:56:13.959063 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podStartSLOduration=2.9277239269999997 podStartE2EDuration="5.959030626s" podCreationTimestamp="2025-10-13 06:56:08 +0000 UTC" firstStartedPulling="2025-10-13 06:56:09.672098227 +0000 UTC m=+577.359543429" lastFinishedPulling="2025-10-13 06:56:12.703404926 +0000 UTC m=+580.390850128" observedRunningTime="2025-10-13 06:56:13.957553737 +0000 UTC m=+581.644998989" watchObservedRunningTime="2025-10-13 06:56:13.959030626 +0000 UTC m=+581.646475828" Oct 13 06:56:13 crc kubenswrapper[4664]: I1013 06:56:13.984558 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-nd2p7" podStartSLOduration=2.873786541 podStartE2EDuration="5.984533956s" podCreationTimestamp="2025-10-13 06:56:08 +0000 UTC" firstStartedPulling="2025-10-13 06:56:09.669645441 +0000 UTC m=+577.357090633" lastFinishedPulling="2025-10-13 06:56:12.780392856 +0000 UTC m=+580.467838048" observedRunningTime="2025-10-13 06:56:13.981638938 +0000 UTC m=+581.669084140" watchObservedRunningTime="2025-10-13 06:56:13.984533956 +0000 UTC m=+581.671979148" Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.160428 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.392623 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mjr5r"] Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.393492 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-controller" containerID="cri-o://a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.393559 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="sbdb" containerID="cri-o://a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.393646 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.393682 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-node" containerID="cri-o://777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.393784 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="northd" containerID="cri-o://1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.393878 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-acl-logging" containerID="cri-o://0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.393826 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="nbdb" containerID="cri-o://ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.485861 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" containerID="cri-o://9a5f3e1290eef021a5e3fc50abae0cb852921600c76ca982f2e44128e9c91671" gracePeriod=30 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.991362 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/2.log" Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.992554 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/1.log" Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.992622 4664 generic.go:334] "Generic (PLEG): container finished" podID="2f22066f-5783-48bc-85f8-0fbb2eed7e0b" containerID="9fe810f7334c79a98146a6969e8b72c587dd4a4f43578c4792c4fabb4d4334e5" exitCode=2 Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.992717 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerDied","Data":"9fe810f7334c79a98146a6969e8b72c587dd4a4f43578c4792c4fabb4d4334e5"} Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.992768 4664 scope.go:117] "RemoveContainer" containerID="2f8510ef09dfc8086488d20b2486db8bf036209ea48e85ad8c3f582f9057cb88" Oct 13 06:56:19 crc kubenswrapper[4664]: I1013 06:56:19.993721 4664 scope.go:117] "RemoveContainer" containerID="9fe810f7334c79a98146a6969e8b72c587dd4a4f43578c4792c4fabb4d4334e5" Oct 13 06:56:19 crc kubenswrapper[4664]: E1013 06:56:19.994286 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-bg4kt_openshift-multus(2f22066f-5783-48bc-85f8-0fbb2eed7e0b)\"" pod="openshift-multus/multus-bg4kt" podUID="2f22066f-5783-48bc-85f8-0fbb2eed7e0b" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.001046 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovnkube-controller/3.log" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.003308 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovn-acl-logging/0.log" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.003789 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovn-controller/0.log" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004286 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="9a5f3e1290eef021a5e3fc50abae0cb852921600c76ca982f2e44128e9c91671" exitCode=0 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004329 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485" exitCode=0 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004346 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5" exitCode=0 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004340 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"9a5f3e1290eef021a5e3fc50abae0cb852921600c76ca982f2e44128e9c91671"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004421 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004439 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004453 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004361 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe" exitCode=0 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004488 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c" exitCode=0 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004514 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6" exitCode=0 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004525 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220" exitCode=143 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004536 4664 generic.go:334] "Generic (PLEG): container finished" podID="74eb7029-982d-4294-bed0-63ffe7281479" containerID="a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989" exitCode=143 Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004555 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004568 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004585 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.004597 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989"} Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.028117 4664 scope.go:117] "RemoveContainer" containerID="eb9cd188e7af67b033aa88ec3d7f16fb35a3cf3c13212e1a56fffb08445d8da0" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.257686 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovn-acl-logging/0.log" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.258459 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovn-controller/0.log" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.259650 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334048 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gfjdn"] Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334330 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="northd" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334348 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="northd" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334365 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-ovn-metrics" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334374 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-ovn-metrics" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334382 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334390 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334399 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334407 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334417 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="nbdb" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334427 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="nbdb" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334435 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-node" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334443 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-node" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334455 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334463 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334471 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334479 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334488 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334495 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334508 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-acl-logging" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334516 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-acl-logging" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334528 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="sbdb" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334537 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="sbdb" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334545 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kubecfg-setup" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334554 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kubecfg-setup" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334672 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-ovn-metrics" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334685 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334694 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-acl-logging" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334703 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334712 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334722 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="nbdb" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334731 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="kube-rbac-proxy-node" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334740 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="sbdb" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334749 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334760 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovn-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334769 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="northd" Oct 13 06:56:20 crc kubenswrapper[4664]: E1013 06:56:20.334898 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.334908 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.335040 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="74eb7029-982d-4294-bed0-63ffe7281479" containerName="ovnkube-controller" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.336860 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.339674 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/74eb7029-982d-4294-bed0-63ffe7281479-ovn-node-metrics-cert\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.339927 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-ovn-kubernetes\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.339991 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-systemd\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340018 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-netns\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340038 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-log-socket\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340071 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-config\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340058 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340093 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-openvswitch\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340145 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340200 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-script-lib\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340256 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knvjm\" (UniqueName: \"kubernetes.io/projected/74eb7029-982d-4294-bed0-63ffe7281479-kube-api-access-knvjm\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340278 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-etc-openvswitch\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340290 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-log-socket" (OuterVolumeSpecName: "log-socket") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340315 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-var-lib-cni-networks-ovn-kubernetes\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340325 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340347 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-ovn\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340362 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340375 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-slash\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340394 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-node-log\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340411 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-systemd-units\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340452 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-env-overrides\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340485 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-bin\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340503 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-var-lib-openvswitch\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340536 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-kubelet\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340576 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-netd\") pod \"74eb7029-982d-4294-bed0-63ffe7281479\" (UID: \"74eb7029-982d-4294-bed0-63ffe7281479\") " Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.340760 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341107 4664 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341122 4664 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-run-netns\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341135 4664 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341146 4664 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341159 4664 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341191 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341223 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341244 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-slash" (OuterVolumeSpecName: "host-slash") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341263 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-node-log" (OuterVolumeSpecName: "node-log") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341284 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341411 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341428 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341456 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341476 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341644 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.341772 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.349704 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74eb7029-982d-4294-bed0-63ffe7281479-kube-api-access-knvjm" (OuterVolumeSpecName: "kube-api-access-knvjm") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "kube-api-access-knvjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.351107 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74eb7029-982d-4294-bed0-63ffe7281479-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.362358 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "74eb7029-982d-4294-bed0-63ffe7281479" (UID: "74eb7029-982d-4294-bed0-63ffe7281479"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.441927 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-slash\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.441992 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmlx8\" (UniqueName: \"kubernetes.io/projected/fdab5d83-a4da-441f-9ca3-7d73c6696277-kube-api-access-xmlx8\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442022 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-systemd-units\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442214 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovnkube-config\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442350 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-run-ovn-kubernetes\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442440 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovn-node-metrics-cert\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442501 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-run-netns\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442543 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-ovn\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442579 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-systemd\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442620 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-var-lib-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442647 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-cni-netd\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442676 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovnkube-script-lib\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442708 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-etc-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442758 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-env-overrides\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442834 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-cni-bin\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.442973 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-node-log\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443034 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443066 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-kubelet\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443103 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443145 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-log-socket\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443254 4664 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-systemd\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443272 4664 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-log-socket\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443282 4664 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443294 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knvjm\" (UniqueName: \"kubernetes.io/projected/74eb7029-982d-4294-bed0-63ffe7281479-kube-api-access-knvjm\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443306 4664 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443316 4664 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443325 4664 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-slash\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443333 4664 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-node-log\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443341 4664 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-systemd-units\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443351 4664 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/74eb7029-982d-4294-bed0-63ffe7281479-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443359 4664 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-bin\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443370 4664 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443379 4664 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-kubelet\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443390 4664 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/74eb7029-982d-4294-bed0-63ffe7281479-host-cni-netd\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.443400 4664 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/74eb7029-982d-4294-bed0-63ffe7281479-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545626 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545691 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-kubelet\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545722 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545751 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-log-socket\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545790 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-slash\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545839 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmlx8\" (UniqueName: \"kubernetes.io/projected/fdab5d83-a4da-441f-9ca3-7d73c6696277-kube-api-access-xmlx8\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545860 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-systemd-units\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545895 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovnkube-config\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545950 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-run-ovn-kubernetes\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.545988 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovn-node-metrics-cert\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546022 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-run-netns\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546050 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-ovn\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546080 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-systemd\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546112 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-cni-netd\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546156 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovnkube-script-lib\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546184 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-var-lib-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546216 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-etc-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546248 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-env-overrides\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546282 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-cni-bin\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546327 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-node-log\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546429 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-node-log\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546495 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546534 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-kubelet\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546574 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546616 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-log-socket\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.546653 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-slash\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.547090 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-systemd-units\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.547953 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-cni-netd\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.547981 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovnkube-config\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548127 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-ovn\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548140 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-run-ovn-kubernetes\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548167 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-run-systemd\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548180 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-cni-bin\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548209 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-host-run-netns\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548231 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-var-lib-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548316 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/fdab5d83-a4da-441f-9ca3-7d73c6696277-etc-openvswitch\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548861 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-env-overrides\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.548935 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovnkube-script-lib\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.558110 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/fdab5d83-a4da-441f-9ca3-7d73c6696277-ovn-node-metrics-cert\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.569079 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmlx8\" (UniqueName: \"kubernetes.io/projected/fdab5d83-a4da-441f-9ca3-7d73c6696277-kube-api-access-xmlx8\") pod \"ovnkube-node-gfjdn\" (UID: \"fdab5d83-a4da-441f-9ca3-7d73c6696277\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:20 crc kubenswrapper[4664]: I1013 06:56:20.673781 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.017272 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovn-acl-logging/0.log" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.018291 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mjr5r_74eb7029-982d-4294-bed0-63ffe7281479/ovn-controller/0.log" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.018746 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" event={"ID":"74eb7029-982d-4294-bed0-63ffe7281479","Type":"ContainerDied","Data":"f989b2b6944d3f4d874741060aa5479bbc22ba41ddedf3aeb9a5d8862358a48b"} Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.018808 4664 scope.go:117] "RemoveContainer" containerID="9a5f3e1290eef021a5e3fc50abae0cb852921600c76ca982f2e44128e9c91671" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.018922 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mjr5r" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.021316 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/2.log" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.025615 4664 generic.go:334] "Generic (PLEG): container finished" podID="fdab5d83-a4da-441f-9ca3-7d73c6696277" containerID="aeb173db161582edb1ac963fd17c436292964a08f933bec3d0bf98ba43aba194" exitCode=0 Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.025687 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerDied","Data":"aeb173db161582edb1ac963fd17c436292964a08f933bec3d0bf98ba43aba194"} Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.025785 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"1d570cc191bce897dad6826f142392954952cc6b2ec846e1e9c994c7921f1000"} Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.048779 4664 scope.go:117] "RemoveContainer" containerID="a96e71c7baebe1c6c450a25416faaee5867675c8b8931ee3044bc718f0812485" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.089051 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mjr5r"] Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.094196 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mjr5r"] Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.099773 4664 scope.go:117] "RemoveContainer" containerID="ef7e330f461ad667628fc5e91c6ccdb09c63d1e226035bf67b9e235fbeb677d5" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.125557 4664 scope.go:117] "RemoveContainer" containerID="1698428abf1b5ee014967cabe8e3a95748157a4ec7939eb1b27ac002619d4ebe" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.145168 4664 scope.go:117] "RemoveContainer" containerID="fed0f5c15cdc3449c7be2d801bc53067c6e87eb99f74a40552b500c711b8ce5c" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.175413 4664 scope.go:117] "RemoveContainer" containerID="777e32af96ea9cdb96a4c4e682589197ad2da09a3d022d0bd6afc40e4cec8dc6" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.194423 4664 scope.go:117] "RemoveContainer" containerID="0271d2b371e53d601b059ecd96d5e3ed0c83ca96047bbde7e9e0bc1090ea7220" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.219345 4664 scope.go:117] "RemoveContainer" containerID="a6036c5ecc8404263d8bc6c5ab8dc6745f9cb416bb0fcc8796ce4f35c7f65989" Oct 13 06:56:21 crc kubenswrapper[4664]: I1013 06:56:21.242367 4664 scope.go:117] "RemoveContainer" containerID="715da4d4108e55c5c65acb785f666bbca371552bff89d768e9c814b6d718e801" Oct 13 06:56:22 crc kubenswrapper[4664]: I1013 06:56:22.035359 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"df940d23485a35c7f4ce85c1324d512ebc0e1fb674f240ff13b34bb81a32121e"} Oct 13 06:56:22 crc kubenswrapper[4664]: I1013 06:56:22.035834 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"cd868af75ad4ca88a3a08906bfebfac34604338257a9ba5404b0b56e374b7884"} Oct 13 06:56:22 crc kubenswrapper[4664]: I1013 06:56:22.035854 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"951c8b9f6fc69586da670aee4a1f3779cae348be96771f0efa1594b7ea1a2f0d"} Oct 13 06:56:22 crc kubenswrapper[4664]: I1013 06:56:22.035866 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"0ee6f5aa82939aebef16330c12328ce2757b4696fc1ee082beaa3ef96e0e5ffe"} Oct 13 06:56:22 crc kubenswrapper[4664]: I1013 06:56:22.035889 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"f71f750fdfb33a2a3f0fbd49619c0057345b3217d7c9dffd87e15f0eb3eeda5b"} Oct 13 06:56:22 crc kubenswrapper[4664]: I1013 06:56:22.035902 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"a0b7b4af1b400043aa3647e39eb2d0727bac3861f5bf9310f339abc3f44d0071"} Oct 13 06:56:23 crc kubenswrapper[4664]: I1013 06:56:23.058302 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74eb7029-982d-4294-bed0-63ffe7281479" path="/var/lib/kubelet/pods/74eb7029-982d-4294-bed0-63ffe7281479/volumes" Oct 13 06:56:25 crc kubenswrapper[4664]: I1013 06:56:25.065485 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"dfe540b4a0a81ce59d91abd5a0736ea4f5678fcd9fc8dbd2df82d6fe7280e18b"} Oct 13 06:56:27 crc kubenswrapper[4664]: I1013 06:56:27.094916 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" event={"ID":"fdab5d83-a4da-441f-9ca3-7d73c6696277","Type":"ContainerStarted","Data":"1fcaa62d3d466d37089209da925781fde4ac63345faba0c4d0ac4e7943205ab9"} Oct 13 06:56:27 crc kubenswrapper[4664]: I1013 06:56:27.095383 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:27 crc kubenswrapper[4664]: I1013 06:56:27.095408 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:27 crc kubenswrapper[4664]: I1013 06:56:27.095421 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:27 crc kubenswrapper[4664]: I1013 06:56:27.135734 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:27 crc kubenswrapper[4664]: I1013 06:56:27.145208 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" podStartSLOduration=7.145184634 podStartE2EDuration="7.145184634s" podCreationTimestamp="2025-10-13 06:56:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:56:27.134905616 +0000 UTC m=+594.822350848" watchObservedRunningTime="2025-10-13 06:56:27.145184634 +0000 UTC m=+594.832629836" Oct 13 06:56:27 crc kubenswrapper[4664]: I1013 06:56:27.147098 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:28 crc kubenswrapper[4664]: I1013 06:56:28.812524 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:56:28 crc kubenswrapper[4664]: I1013 06:56:28.812630 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:56:33 crc kubenswrapper[4664]: I1013 06:56:33.050162 4664 scope.go:117] "RemoveContainer" containerID="9fe810f7334c79a98146a6969e8b72c587dd4a4f43578c4792c4fabb4d4334e5" Oct 13 06:56:33 crc kubenswrapper[4664]: E1013 06:56:33.051336 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-bg4kt_openshift-multus(2f22066f-5783-48bc-85f8-0fbb2eed7e0b)\"" pod="openshift-multus/multus-bg4kt" podUID="2f22066f-5783-48bc-85f8-0fbb2eed7e0b" Oct 13 06:56:33 crc kubenswrapper[4664]: I1013 06:56:33.320046 4664 scope.go:117] "RemoveContainer" containerID="49d5ed9eac252c054bc034083cb6c0929bc5c42a3808f466452d101f66a0ea73" Oct 13 06:56:33 crc kubenswrapper[4664]: I1013 06:56:33.346290 4664 scope.go:117] "RemoveContainer" containerID="ed604d1f8d88e1832c24808c301496cdcae6c132fcb3d58c974aa18d5635b7d3" Oct 13 06:56:48 crc kubenswrapper[4664]: I1013 06:56:48.047149 4664 scope.go:117] "RemoveContainer" containerID="9fe810f7334c79a98146a6969e8b72c587dd4a4f43578c4792c4fabb4d4334e5" Oct 13 06:56:48 crc kubenswrapper[4664]: I1013 06:56:48.252615 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bg4kt_2f22066f-5783-48bc-85f8-0fbb2eed7e0b/kube-multus/2.log" Oct 13 06:56:48 crc kubenswrapper[4664]: I1013 06:56:48.252710 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bg4kt" event={"ID":"2f22066f-5783-48bc-85f8-0fbb2eed7e0b","Type":"ContainerStarted","Data":"be00a8e4718d87980ca6412f8a5dce9a05e4c3aa3ada2f3758b90d3da017005a"} Oct 13 06:56:50 crc kubenswrapper[4664]: I1013 06:56:50.716338 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gfjdn" Oct 13 06:56:58 crc kubenswrapper[4664]: I1013 06:56:58.812477 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:56:58 crc kubenswrapper[4664]: I1013 06:56:58.813366 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:56:58 crc kubenswrapper[4664]: I1013 06:56:58.813444 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 06:56:58 crc kubenswrapper[4664]: I1013 06:56:58.814432 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8bc911ca0dfde386d072e0e4fe8a1821b2571d5b4a57fb451b33ca409337c948"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 06:56:58 crc kubenswrapper[4664]: I1013 06:56:58.814541 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://8bc911ca0dfde386d072e0e4fe8a1821b2571d5b4a57fb451b33ca409337c948" gracePeriod=600 Oct 13 06:56:59 crc kubenswrapper[4664]: I1013 06:56:59.350092 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="8bc911ca0dfde386d072e0e4fe8a1821b2571d5b4a57fb451b33ca409337c948" exitCode=0 Oct 13 06:56:59 crc kubenswrapper[4664]: I1013 06:56:59.350182 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"8bc911ca0dfde386d072e0e4fe8a1821b2571d5b4a57fb451b33ca409337c948"} Oct 13 06:56:59 crc kubenswrapper[4664]: I1013 06:56:59.350515 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"493f5a79780ab2c8a05a8d3a586ef54fbc0335841ed7372ce3fb36a1348a27b5"} Oct 13 06:56:59 crc kubenswrapper[4664]: I1013 06:56:59.350551 4664 scope.go:117] "RemoveContainer" containerID="dc7a7f9feba16667d03ca95a6bd98653a84020768447659f114a7e08147be5b2" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.057724 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz"] Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.062549 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.065737 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.076185 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz"] Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.233368 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq9j4\" (UniqueName: \"kubernetes.io/projected/645e6031-efcb-4cdd-8758-670729f16fd2-kube-api-access-xq9j4\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.233449 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.233788 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.335544 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.335691 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq9j4\" (UniqueName: \"kubernetes.io/projected/645e6031-efcb-4cdd-8758-670729f16fd2-kube-api-access-xq9j4\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.335734 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.336358 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.336566 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.385982 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq9j4\" (UniqueName: \"kubernetes.io/projected/645e6031-efcb-4cdd-8758-670729f16fd2-kube-api-access-xq9j4\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:02 crc kubenswrapper[4664]: I1013 06:57:02.682128 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:03 crc kubenswrapper[4664]: I1013 06:57:03.025864 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz"] Oct 13 06:57:03 crc kubenswrapper[4664]: W1013 06:57:03.029870 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod645e6031_efcb_4cdd_8758_670729f16fd2.slice/crio-46126e4e5d829f9b968816db1a4bdd1f47e985cb1cc89db14fa602d9fba5d60c WatchSource:0}: Error finding container 46126e4e5d829f9b968816db1a4bdd1f47e985cb1cc89db14fa602d9fba5d60c: Status 404 returned error can't find the container with id 46126e4e5d829f9b968816db1a4bdd1f47e985cb1cc89db14fa602d9fba5d60c Oct 13 06:57:03 crc kubenswrapper[4664]: I1013 06:57:03.380725 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" event={"ID":"645e6031-efcb-4cdd-8758-670729f16fd2","Type":"ContainerStarted","Data":"72e0579ca0d2ca201f68600c548ca81bd2d0e131af9323d9cb675759d3e10e3e"} Oct 13 06:57:03 crc kubenswrapper[4664]: I1013 06:57:03.381258 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" event={"ID":"645e6031-efcb-4cdd-8758-670729f16fd2","Type":"ContainerStarted","Data":"46126e4e5d829f9b968816db1a4bdd1f47e985cb1cc89db14fa602d9fba5d60c"} Oct 13 06:57:04 crc kubenswrapper[4664]: I1013 06:57:04.391626 4664 generic.go:334] "Generic (PLEG): container finished" podID="645e6031-efcb-4cdd-8758-670729f16fd2" containerID="72e0579ca0d2ca201f68600c548ca81bd2d0e131af9323d9cb675759d3e10e3e" exitCode=0 Oct 13 06:57:04 crc kubenswrapper[4664]: I1013 06:57:04.391706 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" event={"ID":"645e6031-efcb-4cdd-8758-670729f16fd2","Type":"ContainerDied","Data":"72e0579ca0d2ca201f68600c548ca81bd2d0e131af9323d9cb675759d3e10e3e"} Oct 13 06:57:06 crc kubenswrapper[4664]: I1013 06:57:06.408097 4664 generic.go:334] "Generic (PLEG): container finished" podID="645e6031-efcb-4cdd-8758-670729f16fd2" containerID="8f2b3d3e27752269356977f5dfc1a81cb4933a178bc3e85edd07c805fec9a5c8" exitCode=0 Oct 13 06:57:06 crc kubenswrapper[4664]: I1013 06:57:06.408443 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" event={"ID":"645e6031-efcb-4cdd-8758-670729f16fd2","Type":"ContainerDied","Data":"8f2b3d3e27752269356977f5dfc1a81cb4933a178bc3e85edd07c805fec9a5c8"} Oct 13 06:57:07 crc kubenswrapper[4664]: I1013 06:57:07.422143 4664 generic.go:334] "Generic (PLEG): container finished" podID="645e6031-efcb-4cdd-8758-670729f16fd2" containerID="0d05652aec989ce1a701a68196a91b49d820d0c27e0bdc8840d62c9f94e7ce18" exitCode=0 Oct 13 06:57:07 crc kubenswrapper[4664]: I1013 06:57:07.422222 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" event={"ID":"645e6031-efcb-4cdd-8758-670729f16fd2","Type":"ContainerDied","Data":"0d05652aec989ce1a701a68196a91b49d820d0c27e0bdc8840d62c9f94e7ce18"} Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.687925 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.847305 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-bundle\") pod \"645e6031-efcb-4cdd-8758-670729f16fd2\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.847404 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq9j4\" (UniqueName: \"kubernetes.io/projected/645e6031-efcb-4cdd-8758-670729f16fd2-kube-api-access-xq9j4\") pod \"645e6031-efcb-4cdd-8758-670729f16fd2\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.847526 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-util\") pod \"645e6031-efcb-4cdd-8758-670729f16fd2\" (UID: \"645e6031-efcb-4cdd-8758-670729f16fd2\") " Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.848942 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-bundle" (OuterVolumeSpecName: "bundle") pod "645e6031-efcb-4cdd-8758-670729f16fd2" (UID: "645e6031-efcb-4cdd-8758-670729f16fd2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.857707 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/645e6031-efcb-4cdd-8758-670729f16fd2-kube-api-access-xq9j4" (OuterVolumeSpecName: "kube-api-access-xq9j4") pod "645e6031-efcb-4cdd-8758-670729f16fd2" (UID: "645e6031-efcb-4cdd-8758-670729f16fd2"). InnerVolumeSpecName "kube-api-access-xq9j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.949670 4664 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:08 crc kubenswrapper[4664]: I1013 06:57:08.949749 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq9j4\" (UniqueName: \"kubernetes.io/projected/645e6031-efcb-4cdd-8758-670729f16fd2-kube-api-access-xq9j4\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:09 crc kubenswrapper[4664]: I1013 06:57:09.013777 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-util" (OuterVolumeSpecName: "util") pod "645e6031-efcb-4cdd-8758-670729f16fd2" (UID: "645e6031-efcb-4cdd-8758-670729f16fd2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:57:09 crc kubenswrapper[4664]: I1013 06:57:09.050616 4664 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645e6031-efcb-4cdd-8758-670729f16fd2-util\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:09 crc kubenswrapper[4664]: I1013 06:57:09.440938 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" event={"ID":"645e6031-efcb-4cdd-8758-670729f16fd2","Type":"ContainerDied","Data":"46126e4e5d829f9b968816db1a4bdd1f47e985cb1cc89db14fa602d9fba5d60c"} Oct 13 06:57:09 crc kubenswrapper[4664]: I1013 06:57:09.441012 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46126e4e5d829f9b968816db1a4bdd1f47e985cb1cc89db14fa602d9fba5d60c" Oct 13 06:57:09 crc kubenswrapper[4664]: I1013 06:57:09.441014 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.620378 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv"] Oct 13 06:57:13 crc kubenswrapper[4664]: E1013 06:57:13.621213 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645e6031-efcb-4cdd-8758-670729f16fd2" containerName="util" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.621235 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="645e6031-efcb-4cdd-8758-670729f16fd2" containerName="util" Oct 13 06:57:13 crc kubenswrapper[4664]: E1013 06:57:13.621259 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645e6031-efcb-4cdd-8758-670729f16fd2" containerName="extract" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.621270 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="645e6031-efcb-4cdd-8758-670729f16fd2" containerName="extract" Oct 13 06:57:13 crc kubenswrapper[4664]: E1013 06:57:13.621291 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645e6031-efcb-4cdd-8758-670729f16fd2" containerName="pull" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.621300 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="645e6031-efcb-4cdd-8758-670729f16fd2" containerName="pull" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.621421 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="645e6031-efcb-4cdd-8758-670729f16fd2" containerName="extract" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.621981 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.625024 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.625097 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8px7n" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.637920 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv"] Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.639656 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.718336 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pj7m\" (UniqueName: \"kubernetes.io/projected/5e688f39-2130-46ae-8ca0-d1926e83ad24-kube-api-access-9pj7m\") pod \"nmstate-operator-858ddd8f98-fsrhv\" (UID: \"5e688f39-2130-46ae-8ca0-d1926e83ad24\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.819832 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pj7m\" (UniqueName: \"kubernetes.io/projected/5e688f39-2130-46ae-8ca0-d1926e83ad24-kube-api-access-9pj7m\") pod \"nmstate-operator-858ddd8f98-fsrhv\" (UID: \"5e688f39-2130-46ae-8ca0-d1926e83ad24\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.855497 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pj7m\" (UniqueName: \"kubernetes.io/projected/5e688f39-2130-46ae-8ca0-d1926e83ad24-kube-api-access-9pj7m\") pod \"nmstate-operator-858ddd8f98-fsrhv\" (UID: \"5e688f39-2130-46ae-8ca0-d1926e83ad24\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" Oct 13 06:57:13 crc kubenswrapper[4664]: I1013 06:57:13.939470 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" Oct 13 06:57:14 crc kubenswrapper[4664]: I1013 06:57:14.385668 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv"] Oct 13 06:57:14 crc kubenswrapper[4664]: I1013 06:57:14.470413 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" event={"ID":"5e688f39-2130-46ae-8ca0-d1926e83ad24","Type":"ContainerStarted","Data":"7cc2d758862f7c3d54203ea1d943188f0026444356b2b9cf5dadad40dca38650"} Oct 13 06:57:17 crc kubenswrapper[4664]: I1013 06:57:17.493586 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" event={"ID":"5e688f39-2130-46ae-8ca0-d1926e83ad24","Type":"ContainerStarted","Data":"e23f95747fbf412351a957411edf741cd5bc183ee208cd6e2b0d5ef92cf07a9e"} Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.510808 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fsrhv" podStartSLOduration=7.571448383 podStartE2EDuration="9.5107684s" podCreationTimestamp="2025-10-13 06:57:13 +0000 UTC" firstStartedPulling="2025-10-13 06:57:14.398972054 +0000 UTC m=+642.086417246" lastFinishedPulling="2025-10-13 06:57:16.338292061 +0000 UTC m=+644.025737263" observedRunningTime="2025-10-13 06:57:17.514388399 +0000 UTC m=+645.201833621" watchObservedRunningTime="2025-10-13 06:57:22.5107684 +0000 UTC m=+650.198213602" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.513135 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.514393 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.517188 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-j6bxs" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.539857 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.546692 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.547594 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.553483 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.573074 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.609911 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj5r5\" (UniqueName: \"kubernetes.io/projected/c1894568-58c9-473a-bbd8-484ed8a89a6d-kube-api-access-xj5r5\") pod \"nmstate-metrics-fdff9cb8d-rncps\" (UID: \"c1894568-58c9-473a-bbd8-484ed8a89a6d\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.615773 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-khxbh"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.616533 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.711413 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d5f7b13a-10cb-44b2-89b1-e434b8f81923-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-kwpkw\" (UID: \"d5f7b13a-10cb-44b2-89b1-e434b8f81923\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.711477 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-nmstate-lock\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.711745 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj5r5\" (UniqueName: \"kubernetes.io/projected/c1894568-58c9-473a-bbd8-484ed8a89a6d-kube-api-access-xj5r5\") pod \"nmstate-metrics-fdff9cb8d-rncps\" (UID: \"c1894568-58c9-473a-bbd8-484ed8a89a6d\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.711861 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdwmh\" (UniqueName: \"kubernetes.io/projected/d5f7b13a-10cb-44b2-89b1-e434b8f81923-kube-api-access-qdwmh\") pod \"nmstate-webhook-6cdbc54649-kwpkw\" (UID: \"d5f7b13a-10cb-44b2-89b1-e434b8f81923\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.711950 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-dbus-socket\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.711995 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tngh\" (UniqueName: \"kubernetes.io/projected/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-kube-api-access-4tngh\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.712025 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-ovs-socket\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.747712 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.748493 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.750989 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.751058 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.751528 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-fndrd" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.755766 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj5r5\" (UniqueName: \"kubernetes.io/projected/c1894568-58c9-473a-bbd8-484ed8a89a6d-kube-api-access-xj5r5\") pod \"nmstate-metrics-fdff9cb8d-rncps\" (UID: \"c1894568-58c9-473a-bbd8-484ed8a89a6d\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.773312 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813674 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdwmh\" (UniqueName: \"kubernetes.io/projected/d5f7b13a-10cb-44b2-89b1-e434b8f81923-kube-api-access-qdwmh\") pod \"nmstate-webhook-6cdbc54649-kwpkw\" (UID: \"d5f7b13a-10cb-44b2-89b1-e434b8f81923\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813728 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-dbus-socket\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813749 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tngh\" (UniqueName: \"kubernetes.io/projected/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-kube-api-access-4tngh\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813769 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-ovs-socket\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813841 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d5f7b13a-10cb-44b2-89b1-e434b8f81923-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-kwpkw\" (UID: \"d5f7b13a-10cb-44b2-89b1-e434b8f81923\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813863 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-nmstate-lock\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813931 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-nmstate-lock\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.813981 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-ovs-socket\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.814767 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-dbus-socket\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.817331 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d5f7b13a-10cb-44b2-89b1-e434b8f81923-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-kwpkw\" (UID: \"d5f7b13a-10cb-44b2-89b1-e434b8f81923\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.831118 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdwmh\" (UniqueName: \"kubernetes.io/projected/d5f7b13a-10cb-44b2-89b1-e434b8f81923-kube-api-access-qdwmh\") pod \"nmstate-webhook-6cdbc54649-kwpkw\" (UID: \"d5f7b13a-10cb-44b2-89b1-e434b8f81923\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.832953 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tngh\" (UniqueName: \"kubernetes.io/projected/ee1d22c6-0909-4894-aff0-cc30dcbe54a5-kube-api-access-4tngh\") pod \"nmstate-handler-khxbh\" (UID: \"ee1d22c6-0909-4894-aff0-cc30dcbe54a5\") " pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.834927 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.865713 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.915056 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7586db4a-cf0c-40b1-b014-77677f118219-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.915131 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7586db4a-cf0c-40b1-b014-77677f118219-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.915162 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g6hn\" (UniqueName: \"kubernetes.io/projected/7586db4a-cf0c-40b1-b014-77677f118219-kube-api-access-2g6hn\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.934278 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-54c9bfc7b6-rvkt8"] Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.934959 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.935868 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:22 crc kubenswrapper[4664]: I1013 06:57:22.949247 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54c9bfc7b6-rvkt8"] Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.016690 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7586db4a-cf0c-40b1-b014-77677f118219-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.016744 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7586db4a-cf0c-40b1-b014-77677f118219-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.016771 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g6hn\" (UniqueName: \"kubernetes.io/projected/7586db4a-cf0c-40b1-b014-77677f118219-kube-api-access-2g6hn\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:23 crc kubenswrapper[4664]: E1013 06:57:23.017132 4664 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Oct 13 06:57:23 crc kubenswrapper[4664]: E1013 06:57:23.017229 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7586db4a-cf0c-40b1-b014-77677f118219-plugin-serving-cert podName:7586db4a-cf0c-40b1-b014-77677f118219 nodeName:}" failed. No retries permitted until 2025-10-13 06:57:23.517199602 +0000 UTC m=+651.204644794 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/7586db4a-cf0c-40b1-b014-77677f118219-plugin-serving-cert") pod "nmstate-console-plugin-6b874cbd85-825ww" (UID: "7586db4a-cf0c-40b1-b014-77677f118219") : secret "plugin-serving-cert" not found Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.018523 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7586db4a-cf0c-40b1-b014-77677f118219-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.035525 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g6hn\" (UniqueName: \"kubernetes.io/projected/7586db4a-cf0c-40b1-b014-77677f118219-kube-api-access-2g6hn\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.117876 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-config\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.117953 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t9cn\" (UniqueName: \"kubernetes.io/projected/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-kube-api-access-9t9cn\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.117981 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-service-ca\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.118032 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-trusted-ca-bundle\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.118056 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-oauth-config\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.118141 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-serving-cert\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.118172 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-oauth-serving-cert\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.157110 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps"] Oct 13 06:57:23 crc kubenswrapper[4664]: W1013 06:57:23.168773 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1894568_58c9_473a_bbd8_484ed8a89a6d.slice/crio-f793ad9ccf37bf28ea473c5c78f045550f3dc0f3583a7036bd5af7f75a46ec09 WatchSource:0}: Error finding container f793ad9ccf37bf28ea473c5c78f045550f3dc0f3583a7036bd5af7f75a46ec09: Status 404 returned error can't find the container with id f793ad9ccf37bf28ea473c5c78f045550f3dc0f3583a7036bd5af7f75a46ec09 Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.205174 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw"] Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.219004 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-serving-cert\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.219101 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-oauth-serving-cert\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.219149 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-config\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.219172 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t9cn\" (UniqueName: \"kubernetes.io/projected/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-kube-api-access-9t9cn\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.219198 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-service-ca\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.219219 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-trusted-ca-bundle\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.219235 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-oauth-config\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.221307 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-config\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.221434 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-service-ca\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.221919 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-trusted-ca-bundle\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.222358 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-oauth-serving-cert\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.230511 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-oauth-config\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.230895 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-console-serving-cert\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.242116 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t9cn\" (UniqueName: \"kubernetes.io/projected/e9fa90fd-fb25-4656-b2ee-e4788866cf6d-kube-api-access-9t9cn\") pod \"console-54c9bfc7b6-rvkt8\" (UID: \"e9fa90fd-fb25-4656-b2ee-e4788866cf6d\") " pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.278094 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.513945 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54c9bfc7b6-rvkt8"] Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.525157 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7586db4a-cf0c-40b1-b014-77677f118219-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.529660 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7586db4a-cf0c-40b1-b014-77677f118219-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-825ww\" (UID: \"7586db4a-cf0c-40b1-b014-77677f118219\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.548244 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" event={"ID":"d5f7b13a-10cb-44b2-89b1-e434b8f81923","Type":"ContainerStarted","Data":"e2ff6abc9dbdd282012530cd546528f7be40b6fed21db788a255eb52febaba37"} Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.553453 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-khxbh" event={"ID":"ee1d22c6-0909-4894-aff0-cc30dcbe54a5","Type":"ContainerStarted","Data":"9d90245e20d91a9679a540d2defdd7356c696f2feaeee6fd8f3bbe822e3c1537"} Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.554584 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" event={"ID":"c1894568-58c9-473a-bbd8-484ed8a89a6d","Type":"ContainerStarted","Data":"f793ad9ccf37bf28ea473c5c78f045550f3dc0f3583a7036bd5af7f75a46ec09"} Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.555578 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54c9bfc7b6-rvkt8" event={"ID":"e9fa90fd-fb25-4656-b2ee-e4788866cf6d","Type":"ContainerStarted","Data":"e36e2d3e90b466e825ae1a27955881858f5bca16f26cb5a051e2cd0426e09fdb"} Oct 13 06:57:23 crc kubenswrapper[4664]: I1013 06:57:23.693357 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" Oct 13 06:57:24 crc kubenswrapper[4664]: I1013 06:57:24.117059 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww"] Oct 13 06:57:24 crc kubenswrapper[4664]: W1013 06:57:24.131975 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7586db4a_cf0c_40b1_b014_77677f118219.slice/crio-460cc5909e333d494f2925fad3d2a6667844a97113556e1ddaa078c36e7eb4ec WatchSource:0}: Error finding container 460cc5909e333d494f2925fad3d2a6667844a97113556e1ddaa078c36e7eb4ec: Status 404 returned error can't find the container with id 460cc5909e333d494f2925fad3d2a6667844a97113556e1ddaa078c36e7eb4ec Oct 13 06:57:24 crc kubenswrapper[4664]: I1013 06:57:24.564685 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" event={"ID":"7586db4a-cf0c-40b1-b014-77677f118219","Type":"ContainerStarted","Data":"460cc5909e333d494f2925fad3d2a6667844a97113556e1ddaa078c36e7eb4ec"} Oct 13 06:57:24 crc kubenswrapper[4664]: I1013 06:57:24.568212 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54c9bfc7b6-rvkt8" event={"ID":"e9fa90fd-fb25-4656-b2ee-e4788866cf6d","Type":"ContainerStarted","Data":"d79c2b2a80b4c01e02d166b383fd50614b27dfdb422c19e7e400db163d96a693"} Oct 13 06:57:24 crc kubenswrapper[4664]: I1013 06:57:24.592681 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-54c9bfc7b6-rvkt8" podStartSLOduration=2.592663284 podStartE2EDuration="2.592663284s" podCreationTimestamp="2025-10-13 06:57:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:57:24.589477821 +0000 UTC m=+652.276923023" watchObservedRunningTime="2025-10-13 06:57:24.592663284 +0000 UTC m=+652.280108476" Oct 13 06:57:26 crc kubenswrapper[4664]: I1013 06:57:26.581718 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" event={"ID":"c1894568-58c9-473a-bbd8-484ed8a89a6d","Type":"ContainerStarted","Data":"c9ae1169e4890e81f421db810d8d93217fd6d21e1eb16bc9af9e3320de5d44c1"} Oct 13 06:57:26 crc kubenswrapper[4664]: I1013 06:57:26.584005 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-khxbh" event={"ID":"ee1d22c6-0909-4894-aff0-cc30dcbe54a5","Type":"ContainerStarted","Data":"bb129f0842e56247bc220348abe932d2df0763c2a717d89955c181580e19ce6d"} Oct 13 06:57:26 crc kubenswrapper[4664]: I1013 06:57:26.584180 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:26 crc kubenswrapper[4664]: I1013 06:57:26.588316 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" event={"ID":"d5f7b13a-10cb-44b2-89b1-e434b8f81923","Type":"ContainerStarted","Data":"193904e16d18c1977422a4e0b49f9ac17c385a61702f053fc021949e86c446f3"} Oct 13 06:57:26 crc kubenswrapper[4664]: I1013 06:57:26.588494 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:26 crc kubenswrapper[4664]: I1013 06:57:26.608102 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-khxbh" podStartSLOduration=1.981760272 podStartE2EDuration="4.608073049s" podCreationTimestamp="2025-10-13 06:57:22 +0000 UTC" firstStartedPulling="2025-10-13 06:57:23.038527459 +0000 UTC m=+650.725972651" lastFinishedPulling="2025-10-13 06:57:25.664840226 +0000 UTC m=+653.352285428" observedRunningTime="2025-10-13 06:57:26.602426252 +0000 UTC m=+654.289871464" watchObservedRunningTime="2025-10-13 06:57:26.608073049 +0000 UTC m=+654.295518241" Oct 13 06:57:26 crc kubenswrapper[4664]: I1013 06:57:26.626478 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" podStartSLOduration=2.189862829 podStartE2EDuration="4.626451639s" podCreationTimestamp="2025-10-13 06:57:22 +0000 UTC" firstStartedPulling="2025-10-13 06:57:23.224771705 +0000 UTC m=+650.912216897" lastFinishedPulling="2025-10-13 06:57:25.661360495 +0000 UTC m=+653.348805707" observedRunningTime="2025-10-13 06:57:26.620709839 +0000 UTC m=+654.308155031" watchObservedRunningTime="2025-10-13 06:57:26.626451639 +0000 UTC m=+654.313896831" Oct 13 06:57:27 crc kubenswrapper[4664]: I1013 06:57:27.609427 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" event={"ID":"7586db4a-cf0c-40b1-b014-77677f118219","Type":"ContainerStarted","Data":"a59c69ab2188a28f3d54c47cfc565e78ea7d2070bab433d8afad9ea8bd5ef410"} Oct 13 06:57:27 crc kubenswrapper[4664]: I1013 06:57:27.632864 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-825ww" podStartSLOduration=3.120120135 podStartE2EDuration="5.632827493s" podCreationTimestamp="2025-10-13 06:57:22 +0000 UTC" firstStartedPulling="2025-10-13 06:57:24.143128899 +0000 UTC m=+651.830574091" lastFinishedPulling="2025-10-13 06:57:26.655836257 +0000 UTC m=+654.343281449" observedRunningTime="2025-10-13 06:57:27.627142194 +0000 UTC m=+655.314587386" watchObservedRunningTime="2025-10-13 06:57:27.632827493 +0000 UTC m=+655.320272695" Oct 13 06:57:28 crc kubenswrapper[4664]: I1013 06:57:28.619051 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" event={"ID":"c1894568-58c9-473a-bbd8-484ed8a89a6d","Type":"ContainerStarted","Data":"4e68cb89619aa53cf686cd8858362b64bba573eccaa9f53387edd469ea7b6404"} Oct 13 06:57:28 crc kubenswrapper[4664]: I1013 06:57:28.637765 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-rncps" podStartSLOduration=1.8250998 podStartE2EDuration="6.637706077s" podCreationTimestamp="2025-10-13 06:57:22 +0000 UTC" firstStartedPulling="2025-10-13 06:57:23.170774844 +0000 UTC m=+650.858220036" lastFinishedPulling="2025-10-13 06:57:27.983381121 +0000 UTC m=+655.670826313" observedRunningTime="2025-10-13 06:57:28.637040279 +0000 UTC m=+656.324485571" watchObservedRunningTime="2025-10-13 06:57:28.637706077 +0000 UTC m=+656.325151279" Oct 13 06:57:32 crc kubenswrapper[4664]: I1013 06:57:32.968072 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-khxbh" Oct 13 06:57:33 crc kubenswrapper[4664]: I1013 06:57:33.279346 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:33 crc kubenswrapper[4664]: I1013 06:57:33.279412 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:33 crc kubenswrapper[4664]: I1013 06:57:33.289860 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:33 crc kubenswrapper[4664]: I1013 06:57:33.662436 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-54c9bfc7b6-rvkt8" Oct 13 06:57:33 crc kubenswrapper[4664]: I1013 06:57:33.770889 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ml2tj"] Oct 13 06:57:42 crc kubenswrapper[4664]: I1013 06:57:42.873224 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 06:57:58 crc kubenswrapper[4664]: I1013 06:57:58.825517 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-ml2tj" podUID="fb673675-43af-441e-9cf3-f5f283ef9558" containerName="console" containerID="cri-o://6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc" gracePeriod=15 Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.155930 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ml2tj_fb673675-43af-441e-9cf3-f5f283ef9558/console/0.log" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.156056 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.356184 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-trusted-ca-bundle\") pod \"fb673675-43af-441e-9cf3-f5f283ef9558\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.356331 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-oauth-config\") pod \"fb673675-43af-441e-9cf3-f5f283ef9558\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.356395 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-oauth-serving-cert\") pod \"fb673675-43af-441e-9cf3-f5f283ef9558\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.356423 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-console-config\") pod \"fb673675-43af-441e-9cf3-f5f283ef9558\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.356489 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws7fp\" (UniqueName: \"kubernetes.io/projected/fb673675-43af-441e-9cf3-f5f283ef9558-kube-api-access-ws7fp\") pod \"fb673675-43af-441e-9cf3-f5f283ef9558\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.356528 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-service-ca\") pod \"fb673675-43af-441e-9cf3-f5f283ef9558\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.356560 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-serving-cert\") pod \"fb673675-43af-441e-9cf3-f5f283ef9558\" (UID: \"fb673675-43af-441e-9cf3-f5f283ef9558\") " Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.357823 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-service-ca" (OuterVolumeSpecName: "service-ca") pod "fb673675-43af-441e-9cf3-f5f283ef9558" (UID: "fb673675-43af-441e-9cf3-f5f283ef9558"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.358487 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "fb673675-43af-441e-9cf3-f5f283ef9558" (UID: "fb673675-43af-441e-9cf3-f5f283ef9558"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.359135 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-console-config" (OuterVolumeSpecName: "console-config") pod "fb673675-43af-441e-9cf3-f5f283ef9558" (UID: "fb673675-43af-441e-9cf3-f5f283ef9558"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.358122 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "fb673675-43af-441e-9cf3-f5f283ef9558" (UID: "fb673675-43af-441e-9cf3-f5f283ef9558"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.364600 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "fb673675-43af-441e-9cf3-f5f283ef9558" (UID: "fb673675-43af-441e-9cf3-f5f283ef9558"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.365964 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb673675-43af-441e-9cf3-f5f283ef9558-kube-api-access-ws7fp" (OuterVolumeSpecName: "kube-api-access-ws7fp") pod "fb673675-43af-441e-9cf3-f5f283ef9558" (UID: "fb673675-43af-441e-9cf3-f5f283ef9558"). InnerVolumeSpecName "kube-api-access-ws7fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.374108 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "fb673675-43af-441e-9cf3-f5f283ef9558" (UID: "fb673675-43af-441e-9cf3-f5f283ef9558"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.457936 4664 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.457970 4664 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.457980 4664 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.457989 4664 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-console-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.457999 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws7fp\" (UniqueName: \"kubernetes.io/projected/fb673675-43af-441e-9cf3-f5f283ef9558-kube-api-access-ws7fp\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.458009 4664 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fb673675-43af-441e-9cf3-f5f283ef9558-service-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.458019 4664 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb673675-43af-441e-9cf3-f5f283ef9558-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.879719 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ml2tj_fb673675-43af-441e-9cf3-f5f283ef9558/console/0.log" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.879851 4664 generic.go:334] "Generic (PLEG): container finished" podID="fb673675-43af-441e-9cf3-f5f283ef9558" containerID="6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc" exitCode=2 Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.879914 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ml2tj" event={"ID":"fb673675-43af-441e-9cf3-f5f283ef9558","Type":"ContainerDied","Data":"6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc"} Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.879973 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ml2tj" event={"ID":"fb673675-43af-441e-9cf3-f5f283ef9558","Type":"ContainerDied","Data":"7d01b025ab7546e3bf762cbf9a56bc2b8b22cc01ae6ccd2aa2aaf7079ee08e4e"} Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.880012 4664 scope.go:117] "RemoveContainer" containerID="6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.880039 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ml2tj" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.919163 4664 scope.go:117] "RemoveContainer" containerID="6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc" Oct 13 06:57:59 crc kubenswrapper[4664]: E1013 06:57:59.920299 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc\": container with ID starting with 6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc not found: ID does not exist" containerID="6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.920358 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc"} err="failed to get container status \"6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc\": rpc error: code = NotFound desc = could not find container \"6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc\": container with ID starting with 6663c44023601bda5ed1347841de40f07a521491bf0333eb45262a038cace9bc not found: ID does not exist" Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.941644 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ml2tj"] Oct 13 06:57:59 crc kubenswrapper[4664]: I1013 06:57:59.949697 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-ml2tj"] Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.069260 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2"] Oct 13 06:58:00 crc kubenswrapper[4664]: E1013 06:58:00.069638 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb673675-43af-441e-9cf3-f5f283ef9558" containerName="console" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.069668 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb673675-43af-441e-9cf3-f5f283ef9558" containerName="console" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.069872 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb673675-43af-441e-9cf3-f5f283ef9558" containerName="console" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.071246 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.073673 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.084604 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2"] Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.168879 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.169286 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.169485 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2zc2\" (UniqueName: \"kubernetes.io/projected/3821c37c-46e6-432f-9787-b6f422897dde-kube-api-access-m2zc2\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.271336 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.271549 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.271595 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2zc2\" (UniqueName: \"kubernetes.io/projected/3821c37c-46e6-432f-9787-b6f422897dde-kube-api-access-m2zc2\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.272418 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.273025 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.305647 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2zc2\" (UniqueName: \"kubernetes.io/projected/3821c37c-46e6-432f-9787-b6f422897dde-kube-api-access-m2zc2\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.410599 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.636104 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2"] Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.886699 4664 generic.go:334] "Generic (PLEG): container finished" podID="3821c37c-46e6-432f-9787-b6f422897dde" containerID="328f06b10e4e55e481b39b7921f0180fc59f648ee76539579ee9603fa8abdbce" exitCode=0 Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.886808 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" event={"ID":"3821c37c-46e6-432f-9787-b6f422897dde","Type":"ContainerDied","Data":"328f06b10e4e55e481b39b7921f0180fc59f648ee76539579ee9603fa8abdbce"} Oct 13 06:58:00 crc kubenswrapper[4664]: I1013 06:58:00.887226 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" event={"ID":"3821c37c-46e6-432f-9787-b6f422897dde","Type":"ContainerStarted","Data":"6197a8b8cd419554c80495c26507209ed65e68229663595e09edeab44cf14864"} Oct 13 06:58:01 crc kubenswrapper[4664]: I1013 06:58:01.060236 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb673675-43af-441e-9cf3-f5f283ef9558" path="/var/lib/kubelet/pods/fb673675-43af-441e-9cf3-f5f283ef9558/volumes" Oct 13 06:58:02 crc kubenswrapper[4664]: I1013 06:58:02.906326 4664 generic.go:334] "Generic (PLEG): container finished" podID="3821c37c-46e6-432f-9787-b6f422897dde" containerID="d9ed46c6e88477f9a0f60f822d5face60b95aab1111803a588b93b83408e8675" exitCode=0 Oct 13 06:58:02 crc kubenswrapper[4664]: I1013 06:58:02.906408 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" event={"ID":"3821c37c-46e6-432f-9787-b6f422897dde","Type":"ContainerDied","Data":"d9ed46c6e88477f9a0f60f822d5face60b95aab1111803a588b93b83408e8675"} Oct 13 06:58:03 crc kubenswrapper[4664]: I1013 06:58:03.932303 4664 generic.go:334] "Generic (PLEG): container finished" podID="3821c37c-46e6-432f-9787-b6f422897dde" containerID="e2bb0066a9d6333cedc9fa7593aac61e2750e3d843f8c4965859f4b821ba6fec" exitCode=0 Oct 13 06:58:03 crc kubenswrapper[4664]: I1013 06:58:03.932396 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" event={"ID":"3821c37c-46e6-432f-9787-b6f422897dde","Type":"ContainerDied","Data":"e2bb0066a9d6333cedc9fa7593aac61e2750e3d843f8c4965859f4b821ba6fec"} Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.218846 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.355258 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2zc2\" (UniqueName: \"kubernetes.io/projected/3821c37c-46e6-432f-9787-b6f422897dde-kube-api-access-m2zc2\") pod \"3821c37c-46e6-432f-9787-b6f422897dde\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.355858 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-util\") pod \"3821c37c-46e6-432f-9787-b6f422897dde\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.355953 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-bundle\") pod \"3821c37c-46e6-432f-9787-b6f422897dde\" (UID: \"3821c37c-46e6-432f-9787-b6f422897dde\") " Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.357954 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-bundle" (OuterVolumeSpecName: "bundle") pod "3821c37c-46e6-432f-9787-b6f422897dde" (UID: "3821c37c-46e6-432f-9787-b6f422897dde"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.365009 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3821c37c-46e6-432f-9787-b6f422897dde-kube-api-access-m2zc2" (OuterVolumeSpecName: "kube-api-access-m2zc2") pod "3821c37c-46e6-432f-9787-b6f422897dde" (UID: "3821c37c-46e6-432f-9787-b6f422897dde"). InnerVolumeSpecName "kube-api-access-m2zc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.390351 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-util" (OuterVolumeSpecName: "util") pod "3821c37c-46e6-432f-9787-b6f422897dde" (UID: "3821c37c-46e6-432f-9787-b6f422897dde"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.458505 4664 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.458563 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2zc2\" (UniqueName: \"kubernetes.io/projected/3821c37c-46e6-432f-9787-b6f422897dde-kube-api-access-m2zc2\") on node \"crc\" DevicePath \"\"" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.458585 4664 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3821c37c-46e6-432f-9787-b6f422897dde-util\") on node \"crc\" DevicePath \"\"" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.951275 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" event={"ID":"3821c37c-46e6-432f-9787-b6f422897dde","Type":"ContainerDied","Data":"6197a8b8cd419554c80495c26507209ed65e68229663595e09edeab44cf14864"} Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.951329 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6197a8b8cd419554c80495c26507209ed65e68229663595e09edeab44cf14864" Oct 13 06:58:05 crc kubenswrapper[4664]: I1013 06:58:05.951467 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.740455 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk"] Oct 13 06:58:15 crc kubenswrapper[4664]: E1013 06:58:15.741319 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3821c37c-46e6-432f-9787-b6f422897dde" containerName="pull" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.741332 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3821c37c-46e6-432f-9787-b6f422897dde" containerName="pull" Oct 13 06:58:15 crc kubenswrapper[4664]: E1013 06:58:15.741342 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3821c37c-46e6-432f-9787-b6f422897dde" containerName="util" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.741348 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3821c37c-46e6-432f-9787-b6f422897dde" containerName="util" Oct 13 06:58:15 crc kubenswrapper[4664]: E1013 06:58:15.741360 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3821c37c-46e6-432f-9787-b6f422897dde" containerName="extract" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.741366 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3821c37c-46e6-432f-9787-b6f422897dde" containerName="extract" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.741482 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3821c37c-46e6-432f-9787-b6f422897dde" containerName="extract" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.741910 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.745159 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.745320 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.745431 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-swfst" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.745546 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.745682 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.760841 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk"] Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.903393 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-apiservice-cert\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.903464 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t7v6\" (UniqueName: \"kubernetes.io/projected/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-kube-api-access-8t7v6\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:15 crc kubenswrapper[4664]: I1013 06:58:15.903489 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-webhook-cert\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.005202 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-apiservice-cert\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.006136 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t7v6\" (UniqueName: \"kubernetes.io/projected/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-kube-api-access-8t7v6\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.006229 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-webhook-cert\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.021088 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-apiservice-cert\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.021150 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-webhook-cert\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.030367 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t7v6\" (UniqueName: \"kubernetes.io/projected/c5b636b9-85bf-4ebc-8fea-04f2bc895d6a-kube-api-access-8t7v6\") pod \"metallb-operator-controller-manager-85599f4f6-7xvrk\" (UID: \"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a\") " pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.058391 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.086260 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh"] Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.087058 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.091471 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-bqr86" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.092238 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.096456 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.107030 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqlh5\" (UniqueName: \"kubernetes.io/projected/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-kube-api-access-sqlh5\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.107076 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-webhook-cert\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.107168 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-apiservice-cert\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.140583 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh"] Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.237317 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-apiservice-cert\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.237390 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqlh5\" (UniqueName: \"kubernetes.io/projected/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-kube-api-access-sqlh5\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.237419 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-webhook-cert\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.257163 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-apiservice-cert\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.271346 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-webhook-cert\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.275585 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqlh5\" (UniqueName: \"kubernetes.io/projected/0daca0ae-d791-4eca-bdc5-8e5f598b8d85-kube-api-access-sqlh5\") pod \"metallb-operator-webhook-server-594d989fdd-xrzvh\" (UID: \"0daca0ae-d791-4eca-bdc5-8e5f598b8d85\") " pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.417696 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk"] Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.455865 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:16 crc kubenswrapper[4664]: I1013 06:58:16.698493 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh"] Oct 13 06:58:16 crc kubenswrapper[4664]: W1013 06:58:16.707002 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0daca0ae_d791_4eca_bdc5_8e5f598b8d85.slice/crio-42d34c6cbeb59619b1d51752b134e0836aa57294d67298c265cdaef535e26d62 WatchSource:0}: Error finding container 42d34c6cbeb59619b1d51752b134e0836aa57294d67298c265cdaef535e26d62: Status 404 returned error can't find the container with id 42d34c6cbeb59619b1d51752b134e0836aa57294d67298c265cdaef535e26d62 Oct 13 06:58:17 crc kubenswrapper[4664]: I1013 06:58:17.032173 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" event={"ID":"0daca0ae-d791-4eca-bdc5-8e5f598b8d85","Type":"ContainerStarted","Data":"42d34c6cbeb59619b1d51752b134e0836aa57294d67298c265cdaef535e26d62"} Oct 13 06:58:17 crc kubenswrapper[4664]: I1013 06:58:17.032994 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" event={"ID":"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a","Type":"ContainerStarted","Data":"363e84656d1a3a847bb84a425883e295a7a4cad545b690b28b4ee588b06082af"} Oct 13 06:58:23 crc kubenswrapper[4664]: I1013 06:58:23.083290 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" event={"ID":"0daca0ae-d791-4eca-bdc5-8e5f598b8d85","Type":"ContainerStarted","Data":"855894c19e5600eaa4f4f973cdecb4cb7d71e9f941dd3c4ea61c15110e6757f5"} Oct 13 06:58:23 crc kubenswrapper[4664]: I1013 06:58:23.084113 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:23 crc kubenswrapper[4664]: I1013 06:58:23.086400 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" event={"ID":"c5b636b9-85bf-4ebc-8fea-04f2bc895d6a","Type":"ContainerStarted","Data":"5cc9040b2370dfdcb34f0d23707b41b0def65ff149b0808f4042ee738fa31fe7"} Oct 13 06:58:23 crc kubenswrapper[4664]: I1013 06:58:23.086481 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:23 crc kubenswrapper[4664]: I1013 06:58:23.126484 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podStartSLOduration=1.674383805 podStartE2EDuration="7.12646433s" podCreationTimestamp="2025-10-13 06:58:16 +0000 UTC" firstStartedPulling="2025-10-13 06:58:16.710581974 +0000 UTC m=+704.398027166" lastFinishedPulling="2025-10-13 06:58:22.162662499 +0000 UTC m=+709.850107691" observedRunningTime="2025-10-13 06:58:23.123656907 +0000 UTC m=+710.811102109" watchObservedRunningTime="2025-10-13 06:58:23.12646433 +0000 UTC m=+710.813909522" Oct 13 06:58:36 crc kubenswrapper[4664]: I1013 06:58:36.463157 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 06:58:36 crc kubenswrapper[4664]: I1013 06:58:36.498137 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" podStartSLOduration=15.770257542 podStartE2EDuration="21.498116622s" podCreationTimestamp="2025-10-13 06:58:15 +0000 UTC" firstStartedPulling="2025-10-13 06:58:16.426376239 +0000 UTC m=+704.113821431" lastFinishedPulling="2025-10-13 06:58:22.154235309 +0000 UTC m=+709.841680511" observedRunningTime="2025-10-13 06:58:23.150571591 +0000 UTC m=+710.838016783" watchObservedRunningTime="2025-10-13 06:58:36.498116622 +0000 UTC m=+724.185561814" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.080110 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.867303 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-7pjp7"] Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.870760 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.873384 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmzsj\" (UniqueName: \"kubernetes.io/projected/9037bcaf-a327-4e60-acf8-978687eb88e9-kube-api-access-hmzsj\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.873423 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-conf\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.873461 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.873522 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-reloader\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.873610 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-sockets\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.873651 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics-certs\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.873702 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-startup\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.878392 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.878524 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-89kt8" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.878581 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.907763 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs"] Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.908410 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.916758 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.922841 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs"] Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974640 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmzsj\" (UniqueName: \"kubernetes.io/projected/9037bcaf-a327-4e60-acf8-978687eb88e9-kube-api-access-hmzsj\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974684 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-conf\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974711 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974741 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-reloader\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974762 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc585f12-7a56-4e1e-a5a1-4a9ccedd4309-cert\") pod \"frr-k8s-webhook-server-64bf5d555-nf4cs\" (UID: \"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974786 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf2sk\" (UniqueName: \"kubernetes.io/projected/cc585f12-7a56-4e1e-a5a1-4a9ccedd4309-kube-api-access-cf2sk\") pod \"frr-k8s-webhook-server-64bf5d555-nf4cs\" (UID: \"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974850 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-sockets\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974882 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics-certs\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.974900 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-startup\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.975092 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-conf\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.975167 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-reloader\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: E1013 06:58:56.975253 4664 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Oct 13 06:58:56 crc kubenswrapper[4664]: E1013 06:58:56.975306 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics-certs podName:9037bcaf-a327-4e60-acf8-978687eb88e9 nodeName:}" failed. No retries permitted until 2025-10-13 06:58:57.475287371 +0000 UTC m=+745.162732563 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics-certs") pod "frr-k8s-7pjp7" (UID: "9037bcaf-a327-4e60-acf8-978687eb88e9") : secret "frr-k8s-certs-secret" not found Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.975386 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.975674 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-sockets\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:56 crc kubenswrapper[4664]: I1013 06:58:56.975825 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9037bcaf-a327-4e60-acf8-978687eb88e9-frr-startup\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.003954 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmzsj\" (UniqueName: \"kubernetes.io/projected/9037bcaf-a327-4e60-acf8-978687eb88e9-kube-api-access-hmzsj\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.010638 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-sngcf"] Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.011615 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.013553 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-d7q4s" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.019927 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.020083 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.020089 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.024599 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-68d546b9d8-fhr4v"] Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.025485 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.026887 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.039953 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-fhr4v"] Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.075662 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhdp8\" (UniqueName: \"kubernetes.io/projected/84ac3e13-afa3-4136-ba43-738b66c8e84e-kube-api-access-dhdp8\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.075709 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84ac3e13-afa3-4136-ba43-738b66c8e84e-cert\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.075742 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whcfz\" (UniqueName: \"kubernetes.io/projected/54a84985-f353-4f50-aec1-d7b5501c1d2c-kube-api-access-whcfz\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.075885 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/84ac3e13-afa3-4136-ba43-738b66c8e84e-metrics-certs\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.075975 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/54a84985-f353-4f50-aec1-d7b5501c1d2c-metallb-excludel2\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.076018 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc585f12-7a56-4e1e-a5a1-4a9ccedd4309-cert\") pod \"frr-k8s-webhook-server-64bf5d555-nf4cs\" (UID: \"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.076077 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf2sk\" (UniqueName: \"kubernetes.io/projected/cc585f12-7a56-4e1e-a5a1-4a9ccedd4309-kube-api-access-cf2sk\") pod \"frr-k8s-webhook-server-64bf5d555-nf4cs\" (UID: \"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.076118 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.076185 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-metrics-certs\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.080352 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cc585f12-7a56-4e1e-a5a1-4a9ccedd4309-cert\") pod \"frr-k8s-webhook-server-64bf5d555-nf4cs\" (UID: \"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.100138 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf2sk\" (UniqueName: \"kubernetes.io/projected/cc585f12-7a56-4e1e-a5a1-4a9ccedd4309-kube-api-access-cf2sk\") pod \"frr-k8s-webhook-server-64bf5d555-nf4cs\" (UID: \"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.177496 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-metrics-certs\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.177768 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhdp8\" (UniqueName: \"kubernetes.io/projected/84ac3e13-afa3-4136-ba43-738b66c8e84e-kube-api-access-dhdp8\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.177920 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84ac3e13-afa3-4136-ba43-738b66c8e84e-cert\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.178006 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whcfz\" (UniqueName: \"kubernetes.io/projected/54a84985-f353-4f50-aec1-d7b5501c1d2c-kube-api-access-whcfz\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.178098 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/84ac3e13-afa3-4136-ba43-738b66c8e84e-metrics-certs\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.178185 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/54a84985-f353-4f50-aec1-d7b5501c1d2c-metallb-excludel2\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.178294 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: E1013 06:58:57.178430 4664 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 13 06:58:57 crc kubenswrapper[4664]: E1013 06:58:57.178520 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist podName:54a84985-f353-4f50-aec1-d7b5501c1d2c nodeName:}" failed. No retries permitted until 2025-10-13 06:58:57.678506609 +0000 UTC m=+745.365951801 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist") pod "speaker-sngcf" (UID: "54a84985-f353-4f50-aec1-d7b5501c1d2c") : secret "metallb-memberlist" not found Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.178952 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/54a84985-f353-4f50-aec1-d7b5501c1d2c-metallb-excludel2\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.180316 4664 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.182595 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/84ac3e13-afa3-4136-ba43-738b66c8e84e-metrics-certs\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.184209 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-metrics-certs\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.193228 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84ac3e13-afa3-4136-ba43-738b66c8e84e-cert\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.195918 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhdp8\" (UniqueName: \"kubernetes.io/projected/84ac3e13-afa3-4136-ba43-738b66c8e84e-kube-api-access-dhdp8\") pod \"controller-68d546b9d8-fhr4v\" (UID: \"84ac3e13-afa3-4136-ba43-738b66c8e84e\") " pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.197225 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whcfz\" (UniqueName: \"kubernetes.io/projected/54a84985-f353-4f50-aec1-d7b5501c1d2c-kube-api-access-whcfz\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.229422 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.350030 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.481507 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics-certs\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.488463 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9037bcaf-a327-4e60-acf8-978687eb88e9-metrics-certs\") pod \"frr-k8s-7pjp7\" (UID: \"9037bcaf-a327-4e60-acf8-978687eb88e9\") " pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.581266 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-fhr4v"] Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.666302 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs"] Oct 13 06:58:57 crc kubenswrapper[4664]: W1013 06:58:57.678433 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc585f12_7a56_4e1e_a5a1_4a9ccedd4309.slice/crio-6796926f4771b6aaefbc45df25514c7f10071d476bf4098566b2cd65f3c3e343 WatchSource:0}: Error finding container 6796926f4771b6aaefbc45df25514c7f10071d476bf4098566b2cd65f3c3e343: Status 404 returned error can't find the container with id 6796926f4771b6aaefbc45df25514c7f10071d476bf4098566b2cd65f3c3e343 Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.683902 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:57 crc kubenswrapper[4664]: E1013 06:58:57.684106 4664 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 13 06:58:57 crc kubenswrapper[4664]: E1013 06:58:57.684189 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist podName:54a84985-f353-4f50-aec1-d7b5501c1d2c nodeName:}" failed. No retries permitted until 2025-10-13 06:58:58.684168171 +0000 UTC m=+746.371613363 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist") pod "speaker-sngcf" (UID: "54a84985-f353-4f50-aec1-d7b5501c1d2c") : secret "metallb-memberlist" not found Oct 13 06:58:57 crc kubenswrapper[4664]: I1013 06:58:57.788578 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.468450 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" event={"ID":"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309","Type":"ContainerStarted","Data":"6796926f4771b6aaefbc45df25514c7f10071d476bf4098566b2cd65f3c3e343"} Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.469785 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"e132ecd7bba249fc74da1962a262b3e01283e6047dc99abb5d1f0656bf621853"} Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.472284 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-fhr4v" event={"ID":"84ac3e13-afa3-4136-ba43-738b66c8e84e","Type":"ContainerStarted","Data":"e74aa088c51cc86f082c4c8bdbeed22b159c80f4adfcbe75e6225e44aac48445"} Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.472324 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-fhr4v" event={"ID":"84ac3e13-afa3-4136-ba43-738b66c8e84e","Type":"ContainerStarted","Data":"c6892a1a2ed92dcb524057d34f37b78991fe2d6d57041a1cb33304f877355904"} Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.472339 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-fhr4v" event={"ID":"84ac3e13-afa3-4136-ba43-738b66c8e84e","Type":"ContainerStarted","Data":"3b6d3e81ab13edc85bc8d68b66f22c410d4327815e0ba8096b24544aaf317520"} Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.472495 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.740511 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.749320 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54a84985-f353-4f50-aec1-d7b5501c1d2c-memberlist\") pod \"speaker-sngcf\" (UID: \"54a84985-f353-4f50-aec1-d7b5501c1d2c\") " pod="metallb-system/speaker-sngcf" Oct 13 06:58:58 crc kubenswrapper[4664]: I1013 06:58:58.840247 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-sngcf" Oct 13 06:58:59 crc kubenswrapper[4664]: I1013 06:58:59.482484 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sngcf" event={"ID":"54a84985-f353-4f50-aec1-d7b5501c1d2c","Type":"ContainerStarted","Data":"58fa53e01c21928fa21c2ac0c8b9cb13b72feb68c5a50198f9377fd2c0bf6f37"} Oct 13 06:58:59 crc kubenswrapper[4664]: I1013 06:58:59.482857 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sngcf" event={"ID":"54a84985-f353-4f50-aec1-d7b5501c1d2c","Type":"ContainerStarted","Data":"507342871b503480cddae8418b589208ef5ce2ed6f451e5a922cc0eeb95fc5a4"} Oct 13 06:59:00 crc kubenswrapper[4664]: I1013 06:59:00.536590 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sngcf" event={"ID":"54a84985-f353-4f50-aec1-d7b5501c1d2c","Type":"ContainerStarted","Data":"95ec7cde50d51262d9cc26a677e0c53103850ce068c2f582bb265bbad3d1f10f"} Oct 13 06:59:00 crc kubenswrapper[4664]: I1013 06:59:00.537854 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-sngcf" Oct 13 06:59:00 crc kubenswrapper[4664]: I1013 06:59:00.576196 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-68d546b9d8-fhr4v" podStartSLOduration=4.576180666 podStartE2EDuration="4.576180666s" podCreationTimestamp="2025-10-13 06:58:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:58:58.493268723 +0000 UTC m=+746.180713955" watchObservedRunningTime="2025-10-13 06:59:00.576180666 +0000 UTC m=+748.263625858" Oct 13 06:59:03 crc kubenswrapper[4664]: I1013 06:59:03.068895 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-sngcf" podStartSLOduration=7.068863165 podStartE2EDuration="7.068863165s" podCreationTimestamp="2025-10-13 06:58:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:59:00.578899648 +0000 UTC m=+748.266344840" watchObservedRunningTime="2025-10-13 06:59:03.068863165 +0000 UTC m=+750.756308357" Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.284604 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-f66qq"] Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.284831 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" podUID="79847ce1-e701-447b-b9d1-a0609b0b09ab" containerName="controller-manager" containerID="cri-o://95f8386c83ee41f76f1e1264d05f41a4ba3da19f7c4a427be4326855e1a96698" gracePeriod=30 Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.392516 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh"] Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.392842 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" podUID="c43c15db-46da-4cc0-b0fe-0ffaee273be6" containerName="route-controller-manager" containerID="cri-o://45ba609a01840eb747ef8dec94d95b42a4ccc68b173c07f4471e9766f00bc397" gracePeriod=30 Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.564727 4664 generic.go:334] "Generic (PLEG): container finished" podID="c43c15db-46da-4cc0-b0fe-0ffaee273be6" containerID="45ba609a01840eb747ef8dec94d95b42a4ccc68b173c07f4471e9766f00bc397" exitCode=0 Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.564952 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" event={"ID":"c43c15db-46da-4cc0-b0fe-0ffaee273be6","Type":"ContainerDied","Data":"45ba609a01840eb747ef8dec94d95b42a4ccc68b173c07f4471e9766f00bc397"} Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.567262 4664 generic.go:334] "Generic (PLEG): container finished" podID="79847ce1-e701-447b-b9d1-a0609b0b09ab" containerID="95f8386c83ee41f76f1e1264d05f41a4ba3da19f7c4a427be4326855e1a96698" exitCode=0 Oct 13 06:59:04 crc kubenswrapper[4664]: I1013 06:59:04.567310 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" event={"ID":"79847ce1-e701-447b-b9d1-a0609b0b09ab","Type":"ContainerDied","Data":"95f8386c83ee41f76f1e1264d05f41a4ba3da19f7c4a427be4326855e1a96698"} Oct 13 06:59:05 crc kubenswrapper[4664]: I1013 06:59:05.411415 4664 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-f66qq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Oct 13 06:59:05 crc kubenswrapper[4664]: I1013 06:59:05.411467 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" podUID="79847ce1-e701-447b-b9d1-a0609b0b09ab" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.068957 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.079588 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.122570 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9"] Oct 13 06:59:07 crc kubenswrapper[4664]: E1013 06:59:07.122848 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79847ce1-e701-447b-b9d1-a0609b0b09ab" containerName="controller-manager" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.122866 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="79847ce1-e701-447b-b9d1-a0609b0b09ab" containerName="controller-manager" Oct 13 06:59:07 crc kubenswrapper[4664]: E1013 06:59:07.122889 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c43c15db-46da-4cc0-b0fe-0ffaee273be6" containerName="route-controller-manager" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.122898 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="c43c15db-46da-4cc0-b0fe-0ffaee273be6" containerName="route-controller-manager" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.123028 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="c43c15db-46da-4cc0-b0fe-0ffaee273be6" containerName="route-controller-manager" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.123058 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="79847ce1-e701-447b-b9d1-a0609b0b09ab" containerName="controller-manager" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.123505 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.125784 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9"] Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265140 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-client-ca\") pod \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265196 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qljbh\" (UniqueName: \"kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh\") pod \"79847ce1-e701-447b-b9d1-a0609b0b09ab\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265250 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-client-ca\") pod \"79847ce1-e701-447b-b9d1-a0609b0b09ab\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265288 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43c15db-46da-4cc0-b0fe-0ffaee273be6-serving-cert\") pod \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265325 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert\") pod \"79847ce1-e701-447b-b9d1-a0609b0b09ab\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265377 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26jqd\" (UniqueName: \"kubernetes.io/projected/c43c15db-46da-4cc0-b0fe-0ffaee273be6-kube-api-access-26jqd\") pod \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265403 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles\") pod \"79847ce1-e701-447b-b9d1-a0609b0b09ab\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265443 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-config\") pod \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\" (UID: \"c43c15db-46da-4cc0-b0fe-0ffaee273be6\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265681 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-config\") pod \"79847ce1-e701-447b-b9d1-a0609b0b09ab\" (UID: \"79847ce1-e701-447b-b9d1-a0609b0b09ab\") " Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265866 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c44ac49-ba1b-4708-bc11-d98ae6be1973-serving-cert\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265913 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c44ac49-ba1b-4708-bc11-d98ae6be1973-config\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265954 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pz2gq\" (UniqueName: \"kubernetes.io/projected/5c44ac49-ba1b-4708-bc11-d98ae6be1973-kube-api-access-pz2gq\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.265992 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c44ac49-ba1b-4708-bc11-d98ae6be1973-client-ca\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.266206 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-client-ca" (OuterVolumeSpecName: "client-ca") pod "c43c15db-46da-4cc0-b0fe-0ffaee273be6" (UID: "c43c15db-46da-4cc0-b0fe-0ffaee273be6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.266326 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "79847ce1-e701-447b-b9d1-a0609b0b09ab" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.266699 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-config" (OuterVolumeSpecName: "config") pod "c43c15db-46da-4cc0-b0fe-0ffaee273be6" (UID: "c43c15db-46da-4cc0-b0fe-0ffaee273be6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.266743 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-config" (OuterVolumeSpecName: "config") pod "79847ce1-e701-447b-b9d1-a0609b0b09ab" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.267379 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-client-ca" (OuterVolumeSpecName: "client-ca") pod "79847ce1-e701-447b-b9d1-a0609b0b09ab" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.271282 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c43c15db-46da-4cc0-b0fe-0ffaee273be6-kube-api-access-26jqd" (OuterVolumeSpecName: "kube-api-access-26jqd") pod "c43c15db-46da-4cc0-b0fe-0ffaee273be6" (UID: "c43c15db-46da-4cc0-b0fe-0ffaee273be6"). InnerVolumeSpecName "kube-api-access-26jqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.271317 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c43c15db-46da-4cc0-b0fe-0ffaee273be6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c43c15db-46da-4cc0-b0fe-0ffaee273be6" (UID: "c43c15db-46da-4cc0-b0fe-0ffaee273be6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.271335 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh" (OuterVolumeSpecName: "kube-api-access-qljbh") pod "79847ce1-e701-447b-b9d1-a0609b0b09ab" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab"). InnerVolumeSpecName "kube-api-access-qljbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.275066 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "79847ce1-e701-447b-b9d1-a0609b0b09ab" (UID: "79847ce1-e701-447b-b9d1-a0609b0b09ab"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.354551 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-68d546b9d8-fhr4v" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.367491 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c44ac49-ba1b-4708-bc11-d98ae6be1973-serving-cert\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.367577 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c44ac49-ba1b-4708-bc11-d98ae6be1973-config\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.368985 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c44ac49-ba1b-4708-bc11-d98ae6be1973-config\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369108 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pz2gq\" (UniqueName: \"kubernetes.io/projected/5c44ac49-ba1b-4708-bc11-d98ae6be1973-kube-api-access-pz2gq\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369142 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c44ac49-ba1b-4708-bc11-d98ae6be1973-client-ca\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369205 4664 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-client-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369221 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43c15db-46da-4cc0-b0fe-0ffaee273be6-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369233 4664 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79847ce1-e701-447b-b9d1-a0609b0b09ab-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369246 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26jqd\" (UniqueName: \"kubernetes.io/projected/c43c15db-46da-4cc0-b0fe-0ffaee273be6-kube-api-access-26jqd\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369257 4664 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369268 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369279 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79847ce1-e701-447b-b9d1-a0609b0b09ab-config\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369290 4664 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c43c15db-46da-4cc0-b0fe-0ffaee273be6-client-ca\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369302 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qljbh\" (UniqueName: \"kubernetes.io/projected/79847ce1-e701-447b-b9d1-a0609b0b09ab-kube-api-access-qljbh\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.369999 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c44ac49-ba1b-4708-bc11-d98ae6be1973-client-ca\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.373134 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c44ac49-ba1b-4708-bc11-d98ae6be1973-serving-cert\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.418199 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pz2gq\" (UniqueName: \"kubernetes.io/projected/5c44ac49-ba1b-4708-bc11-d98ae6be1973-kube-api-access-pz2gq\") pod \"route-controller-manager-794668dd4-b58l9\" (UID: \"5c44ac49-ba1b-4708-bc11-d98ae6be1973\") " pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.443640 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.597098 4664 generic.go:334] "Generic (PLEG): container finished" podID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerID="9e9127fdb9c5e135aedd1567a069d86f7f2b6bba1e2d4fdf9ae8e8545f731727" exitCode=0 Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.597291 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerDied","Data":"9e9127fdb9c5e135aedd1567a069d86f7f2b6bba1e2d4fdf9ae8e8545f731727"} Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.602767 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" event={"ID":"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309","Type":"ContainerStarted","Data":"a58694108f3bbd4cd37b70b695dbe4baec1759fed367bd158e11dab4ee85eef1"} Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.603228 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.606416 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" event={"ID":"c43c15db-46da-4cc0-b0fe-0ffaee273be6","Type":"ContainerDied","Data":"c82276f3cb1d1b97bb23ce35e792a312d53ee5c758a9ad326684bf87520c821d"} Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.606471 4664 scope.go:117] "RemoveContainer" containerID="45ba609a01840eb747ef8dec94d95b42a4ccc68b173c07f4471e9766f00bc397" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.606585 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.622460 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" event={"ID":"79847ce1-e701-447b-b9d1-a0609b0b09ab","Type":"ContainerDied","Data":"65a89cbad5c28685f2087e9391eba2f2639d32927fea892c13e276d95a208c97"} Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.622544 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-f66qq" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.660718 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podStartSLOduration=2.429979524 podStartE2EDuration="11.66069789s" podCreationTimestamp="2025-10-13 06:58:56 +0000 UTC" firstStartedPulling="2025-10-13 06:58:57.681716765 +0000 UTC m=+745.369161957" lastFinishedPulling="2025-10-13 06:59:06.912435131 +0000 UTC m=+754.599880323" observedRunningTime="2025-10-13 06:59:07.644585301 +0000 UTC m=+755.332030493" watchObservedRunningTime="2025-10-13 06:59:07.66069789 +0000 UTC m=+755.348143082" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.663412 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh"] Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.667575 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6tklh"] Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.671104 4664 scope.go:117] "RemoveContainer" containerID="95f8386c83ee41f76f1e1264d05f41a4ba3da19f7c4a427be4326855e1a96698" Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.681675 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-f66qq"] Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.694415 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-f66qq"] Oct 13 06:59:07 crc kubenswrapper[4664]: I1013 06:59:07.921452 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9"] Oct 13 06:59:08 crc kubenswrapper[4664]: I1013 06:59:08.632840 4664 generic.go:334] "Generic (PLEG): container finished" podID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerID="9e68df902edfee32d08dbe4de981392ba76475fc78a992db51db8fea46e4e5a9" exitCode=0 Oct 13 06:59:08 crc kubenswrapper[4664]: I1013 06:59:08.632957 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerDied","Data":"9e68df902edfee32d08dbe4de981392ba76475fc78a992db51db8fea46e4e5a9"} Oct 13 06:59:08 crc kubenswrapper[4664]: I1013 06:59:08.634839 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" event={"ID":"5c44ac49-ba1b-4708-bc11-d98ae6be1973","Type":"ContainerStarted","Data":"86ad2da63cb4bb3a19f24720fca38fae04d7c18ad2205409a68baa72dd2fa28c"} Oct 13 06:59:08 crc kubenswrapper[4664]: I1013 06:59:08.634877 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" event={"ID":"5c44ac49-ba1b-4708-bc11-d98ae6be1973","Type":"ContainerStarted","Data":"6616d086281b5e84cd85edb592a790a3c91f63b05dade71b6b7f5b47c1643292"} Oct 13 06:59:08 crc kubenswrapper[4664]: I1013 06:59:08.701646 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podStartSLOduration=4.701625694 podStartE2EDuration="4.701625694s" podCreationTimestamp="2025-10-13 06:59:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:59:08.699021614 +0000 UTC m=+756.386466816" watchObservedRunningTime="2025-10-13 06:59:08.701625694 +0000 UTC m=+756.389070886" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.054947 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79847ce1-e701-447b-b9d1-a0609b0b09ab" path="/var/lib/kubelet/pods/79847ce1-e701-447b-b9d1-a0609b0b09ab/volumes" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.055776 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c43c15db-46da-4cc0-b0fe-0ffaee273be6" path="/var/lib/kubelet/pods/c43c15db-46da-4cc0-b0fe-0ffaee273be6/volumes" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.476300 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-67d595f8b9-5ff2q"] Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.478078 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.480301 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.482088 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.484922 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.484990 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.485002 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.485298 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.498106 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.500761 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67d595f8b9-5ff2q"] Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.605196 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-config\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.605258 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c9254a8-804c-462f-b06a-0016170cb46c-serving-cert\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.605285 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-client-ca\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.605518 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-proxy-ca-bundles\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.605615 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq4sk\" (UniqueName: \"kubernetes.io/projected/5c9254a8-804c-462f-b06a-0016170cb46c-kube-api-access-tq4sk\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.649110 4664 generic.go:334] "Generic (PLEG): container finished" podID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerID="11a3dd30970a70c399882520f614e87c50f7cbfa901e1129c7749b0ba95fa523" exitCode=0 Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.649193 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerDied","Data":"11a3dd30970a70c399882520f614e87c50f7cbfa901e1129c7749b0ba95fa523"} Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.649692 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.670238 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.706983 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-config\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.707058 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c9254a8-804c-462f-b06a-0016170cb46c-serving-cert\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.707098 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-client-ca\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.707186 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-proxy-ca-bundles\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.707230 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq4sk\" (UniqueName: \"kubernetes.io/projected/5c9254a8-804c-462f-b06a-0016170cb46c-kube-api-access-tq4sk\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.708694 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-client-ca\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.708953 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-proxy-ca-bundles\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.709207 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c9254a8-804c-462f-b06a-0016170cb46c-config\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.717314 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c9254a8-804c-462f-b06a-0016170cb46c-serving-cert\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.738237 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq4sk\" (UniqueName: \"kubernetes.io/projected/5c9254a8-804c-462f-b06a-0016170cb46c-kube-api-access-tq4sk\") pod \"controller-manager-67d595f8b9-5ff2q\" (UID: \"5c9254a8-804c-462f-b06a-0016170cb46c\") " pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:09 crc kubenswrapper[4664]: I1013 06:59:09.804144 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.315512 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67d595f8b9-5ff2q"] Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.663078 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"947b7ca48a95ada6db699579df21ac6ba2220b65d014ff0899c26c03d189b11f"} Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.664322 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"fb1a11a91e0d55bffb562d1825427be4ba18924aa432e0e0869fbcdebf383020"} Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.664397 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"87154d94753c330eb6056a3751fd3eab8f87aaa130d79f4cdc8c7df5029dd2bd"} Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.664452 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"afaacd9d78172fd3db8975ce43ec5762bb73e1075885515b31c0dbc560938ad9"} Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.664532 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"ad5d420848f09aeb8e3fd92496a4b4b0209c1d45dce76661242be0edd8a2ad69"} Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.665735 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" event={"ID":"5c9254a8-804c-462f-b06a-0016170cb46c","Type":"ContainerStarted","Data":"1a77fb8fc8046104fd1287258b7eb0cbc1809f9fb244d1b9663617926015678f"} Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.666237 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" event={"ID":"5c9254a8-804c-462f-b06a-0016170cb46c","Type":"ContainerStarted","Data":"65bedb662c3e3c125e524d5ad21997a4305af2fafc348f3c362aa186440aeb05"} Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.666331 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.673430 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" start-of-body= Oct 13 06:59:10 crc kubenswrapper[4664]: I1013 06:59:10.673653 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" Oct 13 06:59:11 crc kubenswrapper[4664]: I1013 06:59:11.437486 4664 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 13 06:59:11 crc kubenswrapper[4664]: I1013 06:59:11.682713 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"815bc395e171ce16f38d3fa1ecc10acb60777085f9aee2e13f9a76c39b243b7e"} Oct 13 06:59:11 crc kubenswrapper[4664]: I1013 06:59:11.689705 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 06:59:11 crc kubenswrapper[4664]: I1013 06:59:11.714207 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-7pjp7" podStartSLOduration=6.724231436 podStartE2EDuration="15.714188833s" podCreationTimestamp="2025-10-13 06:58:56 +0000 UTC" firstStartedPulling="2025-10-13 06:58:57.912270572 +0000 UTC m=+745.599715764" lastFinishedPulling="2025-10-13 06:59:06.902227969 +0000 UTC m=+754.589673161" observedRunningTime="2025-10-13 06:59:11.714148932 +0000 UTC m=+759.401594124" watchObservedRunningTime="2025-10-13 06:59:11.714188833 +0000 UTC m=+759.401634045" Oct 13 06:59:11 crc kubenswrapper[4664]: I1013 06:59:11.715946 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podStartSLOduration=7.71593794 podStartE2EDuration="7.71593794s" podCreationTimestamp="2025-10-13 06:59:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 06:59:10.695162665 +0000 UTC m=+758.382607857" watchObservedRunningTime="2025-10-13 06:59:11.71593794 +0000 UTC m=+759.403383152" Oct 13 06:59:12 crc kubenswrapper[4664]: I1013 06:59:12.689511 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:59:12 crc kubenswrapper[4664]: I1013 06:59:12.789152 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:59:12 crc kubenswrapper[4664]: I1013 06:59:12.839526 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:59:17 crc kubenswrapper[4664]: I1013 06:59:17.238213 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 06:59:18 crc kubenswrapper[4664]: I1013 06:59:18.846368 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-sngcf" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.172615 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-rgx9f"] Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.173507 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rgx9f" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.175772 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.176005 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.184148 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-rgx9f"] Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.205928 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhnpp\" (UniqueName: \"kubernetes.io/projected/794a4720-51f3-4bc2-97a1-13184f974794-kube-api-access-vhnpp\") pod \"openstack-operator-index-rgx9f\" (UID: \"794a4720-51f3-4bc2-97a1-13184f974794\") " pod="openstack-operators/openstack-operator-index-rgx9f" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.307586 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhnpp\" (UniqueName: \"kubernetes.io/projected/794a4720-51f3-4bc2-97a1-13184f974794-kube-api-access-vhnpp\") pod \"openstack-operator-index-rgx9f\" (UID: \"794a4720-51f3-4bc2-97a1-13184f974794\") " pod="openstack-operators/openstack-operator-index-rgx9f" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.324983 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhnpp\" (UniqueName: \"kubernetes.io/projected/794a4720-51f3-4bc2-97a1-13184f974794-kube-api-access-vhnpp\") pod \"openstack-operator-index-rgx9f\" (UID: \"794a4720-51f3-4bc2-97a1-13184f974794\") " pod="openstack-operators/openstack-operator-index-rgx9f" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.498634 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rgx9f" Oct 13 06:59:22 crc kubenswrapper[4664]: I1013 06:59:22.951047 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-rgx9f"] Oct 13 06:59:22 crc kubenswrapper[4664]: W1013 06:59:22.982195 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod794a4720_51f3_4bc2_97a1_13184f974794.slice/crio-9473ca3bc57152bb256552e9da06c664eceef71e2a74568ebeb6276ad5d8c9c6 WatchSource:0}: Error finding container 9473ca3bc57152bb256552e9da06c664eceef71e2a74568ebeb6276ad5d8c9c6: Status 404 returned error can't find the container with id 9473ca3bc57152bb256552e9da06c664eceef71e2a74568ebeb6276ad5d8c9c6 Oct 13 06:59:23 crc kubenswrapper[4664]: I1013 06:59:23.781724 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rgx9f" event={"ID":"794a4720-51f3-4bc2-97a1-13184f974794","Type":"ContainerStarted","Data":"9473ca3bc57152bb256552e9da06c664eceef71e2a74568ebeb6276ad5d8c9c6"} Oct 13 06:59:24 crc kubenswrapper[4664]: I1013 06:59:24.807942 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rgx9f" event={"ID":"794a4720-51f3-4bc2-97a1-13184f974794","Type":"ContainerStarted","Data":"e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef"} Oct 13 06:59:24 crc kubenswrapper[4664]: I1013 06:59:24.839639 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-rgx9f" podStartSLOduration=2.177018367 podStartE2EDuration="2.839601973s" podCreationTimestamp="2025-10-13 06:59:22 +0000 UTC" firstStartedPulling="2025-10-13 06:59:22.985301714 +0000 UTC m=+770.672746916" lastFinishedPulling="2025-10-13 06:59:23.64788533 +0000 UTC m=+771.335330522" observedRunningTime="2025-10-13 06:59:24.830854679 +0000 UTC m=+772.518299961" watchObservedRunningTime="2025-10-13 06:59:24.839601973 +0000 UTC m=+772.527047205" Oct 13 06:59:25 crc kubenswrapper[4664]: I1013 06:59:25.346202 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-rgx9f"] Oct 13 06:59:25 crc kubenswrapper[4664]: I1013 06:59:25.949225 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-bsghg"] Oct 13 06:59:25 crc kubenswrapper[4664]: I1013 06:59:25.950597 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:25 crc kubenswrapper[4664]: I1013 06:59:25.953466 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-pp5c4" Oct 13 06:59:25 crc kubenswrapper[4664]: I1013 06:59:25.971612 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgvnz\" (UniqueName: \"kubernetes.io/projected/82047d93-7f79-495b-9e1a-380994104bb0-kube-api-access-xgvnz\") pod \"openstack-operator-index-bsghg\" (UID: \"82047d93-7f79-495b-9e1a-380994104bb0\") " pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:26 crc kubenswrapper[4664]: I1013 06:59:26.014255 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bsghg"] Oct 13 06:59:26 crc kubenswrapper[4664]: I1013 06:59:26.073673 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgvnz\" (UniqueName: \"kubernetes.io/projected/82047d93-7f79-495b-9e1a-380994104bb0-kube-api-access-xgvnz\") pod \"openstack-operator-index-bsghg\" (UID: \"82047d93-7f79-495b-9e1a-380994104bb0\") " pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:26 crc kubenswrapper[4664]: I1013 06:59:26.107871 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgvnz\" (UniqueName: \"kubernetes.io/projected/82047d93-7f79-495b-9e1a-380994104bb0-kube-api-access-xgvnz\") pod \"openstack-operator-index-bsghg\" (UID: \"82047d93-7f79-495b-9e1a-380994104bb0\") " pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:26 crc kubenswrapper[4664]: I1013 06:59:26.271413 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:26 crc kubenswrapper[4664]: I1013 06:59:26.794505 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bsghg"] Oct 13 06:59:26 crc kubenswrapper[4664]: I1013 06:59:26.829408 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-rgx9f" podUID="794a4720-51f3-4bc2-97a1-13184f974794" containerName="registry-server" containerID="cri-o://e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef" gracePeriod=2 Oct 13 06:59:26 crc kubenswrapper[4664]: I1013 06:59:26.829566 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bsghg" event={"ID":"82047d93-7f79-495b-9e1a-380994104bb0","Type":"ContainerStarted","Data":"dcade11a9e717ac6f64b6ad28b24225c7c34da4808925f9913beb209b67c9f8d"} Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.412581 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rgx9f" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.498689 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhnpp\" (UniqueName: \"kubernetes.io/projected/794a4720-51f3-4bc2-97a1-13184f974794-kube-api-access-vhnpp\") pod \"794a4720-51f3-4bc2-97a1-13184f974794\" (UID: \"794a4720-51f3-4bc2-97a1-13184f974794\") " Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.505187 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/794a4720-51f3-4bc2-97a1-13184f974794-kube-api-access-vhnpp" (OuterVolumeSpecName: "kube-api-access-vhnpp") pod "794a4720-51f3-4bc2-97a1-13184f974794" (UID: "794a4720-51f3-4bc2-97a1-13184f974794"). InnerVolumeSpecName "kube-api-access-vhnpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.600751 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhnpp\" (UniqueName: \"kubernetes.io/projected/794a4720-51f3-4bc2-97a1-13184f974794-kube-api-access-vhnpp\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.793894 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-7pjp7" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.841590 4664 generic.go:334] "Generic (PLEG): container finished" podID="794a4720-51f3-4bc2-97a1-13184f974794" containerID="e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef" exitCode=0 Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.841660 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rgx9f" event={"ID":"794a4720-51f3-4bc2-97a1-13184f974794","Type":"ContainerDied","Data":"e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef"} Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.841691 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rgx9f" event={"ID":"794a4720-51f3-4bc2-97a1-13184f974794","Type":"ContainerDied","Data":"9473ca3bc57152bb256552e9da06c664eceef71e2a74568ebeb6276ad5d8c9c6"} Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.841712 4664 scope.go:117] "RemoveContainer" containerID="e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.841848 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rgx9f" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.849018 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bsghg" event={"ID":"82047d93-7f79-495b-9e1a-380994104bb0","Type":"ContainerStarted","Data":"4d871cadf8810bd0cf5bb91d64a83eb1c13c256a6552aa67e1a66c11d22a8a43"} Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.871832 4664 scope.go:117] "RemoveContainer" containerID="e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef" Oct 13 06:59:27 crc kubenswrapper[4664]: E1013 06:59:27.872833 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef\": container with ID starting with e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef not found: ID does not exist" containerID="e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.872914 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef"} err="failed to get container status \"e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef\": rpc error: code = NotFound desc = could not find container \"e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef\": container with ID starting with e593ba8f585dbac9c9313f5191c79602e47f4a50f9fe8ec43844e99a80ed92ef not found: ID does not exist" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.892323 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-bsghg" podStartSLOduration=2.459875222 podStartE2EDuration="2.892163488s" podCreationTimestamp="2025-10-13 06:59:25 +0000 UTC" firstStartedPulling="2025-10-13 06:59:26.81278008 +0000 UTC m=+774.500225312" lastFinishedPulling="2025-10-13 06:59:27.245068376 +0000 UTC m=+774.932513578" observedRunningTime="2025-10-13 06:59:27.880105286 +0000 UTC m=+775.567550488" watchObservedRunningTime="2025-10-13 06:59:27.892163488 +0000 UTC m=+775.579608690" Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.907955 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-rgx9f"] Oct 13 06:59:27 crc kubenswrapper[4664]: I1013 06:59:27.908978 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-rgx9f"] Oct 13 06:59:28 crc kubenswrapper[4664]: I1013 06:59:28.812241 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:59:28 crc kubenswrapper[4664]: I1013 06:59:28.812335 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:59:29 crc kubenswrapper[4664]: I1013 06:59:29.061490 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="794a4720-51f3-4bc2-97a1-13184f974794" path="/var/lib/kubelet/pods/794a4720-51f3-4bc2-97a1-13184f974794/volumes" Oct 13 06:59:36 crc kubenswrapper[4664]: I1013 06:59:36.272903 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:36 crc kubenswrapper[4664]: I1013 06:59:36.273692 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:36 crc kubenswrapper[4664]: I1013 06:59:36.325972 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:36 crc kubenswrapper[4664]: I1013 06:59:36.975985 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-bsghg" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.730700 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn"] Oct 13 06:59:41 crc kubenswrapper[4664]: E1013 06:59:41.732921 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="794a4720-51f3-4bc2-97a1-13184f974794" containerName="registry-server" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.732946 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="794a4720-51f3-4bc2-97a1-13184f974794" containerName="registry-server" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.733228 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="794a4720-51f3-4bc2-97a1-13184f974794" containerName="registry-server" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.735278 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.740573 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-92jsw" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.754685 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn"] Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.922076 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmvsz\" (UniqueName: \"kubernetes.io/projected/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-kube-api-access-zmvsz\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.922397 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-bundle\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:41 crc kubenswrapper[4664]: I1013 06:59:41.922565 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-util\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.024467 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-util\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.024573 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmvsz\" (UniqueName: \"kubernetes.io/projected/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-kube-api-access-zmvsz\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.024647 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-bundle\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.025407 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-bundle\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.025474 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-util\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.049578 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmvsz\" (UniqueName: \"kubernetes.io/projected/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-kube-api-access-zmvsz\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.091030 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.601592 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn"] Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.986784 4664 generic.go:334] "Generic (PLEG): container finished" podID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerID="05043759c0e3c01b26c225b8cdc01c49485f3d09068e42d1a3ebd55215ebe8b3" exitCode=0 Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.986890 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" event={"ID":"5b50ed9e-42f9-4a40-bdfb-61bc560256b5","Type":"ContainerDied","Data":"05043759c0e3c01b26c225b8cdc01c49485f3d09068e42d1a3ebd55215ebe8b3"} Oct 13 06:59:42 crc kubenswrapper[4664]: I1013 06:59:42.986987 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" event={"ID":"5b50ed9e-42f9-4a40-bdfb-61bc560256b5","Type":"ContainerStarted","Data":"8a725faf1669913ae925ca74ad47ce721e5feb3283531dcce5facc83e512cd79"} Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.003937 4664 generic.go:334] "Generic (PLEG): container finished" podID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerID="37b7ecef6e967900a1badafcd8c15099763dbc05956de865f1409481e605f5b4" exitCode=0 Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.004050 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" event={"ID":"5b50ed9e-42f9-4a40-bdfb-61bc560256b5","Type":"ContainerDied","Data":"37b7ecef6e967900a1badafcd8c15099763dbc05956de865f1409481e605f5b4"} Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.278390 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2fpz5"] Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.279751 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.331496 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2fpz5"] Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.475683 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-utilities\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.475977 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-catalog-content\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.476084 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcj49\" (UniqueName: \"kubernetes.io/projected/41ffa5cf-477c-4a33-8409-18d2f40cecf3-kube-api-access-bcj49\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.577044 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-utilities\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.577406 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-catalog-content\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.577573 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcj49\" (UniqueName: \"kubernetes.io/projected/41ffa5cf-477c-4a33-8409-18d2f40cecf3-kube-api-access-bcj49\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.577828 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-catalog-content\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.578045 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-utilities\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.626246 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcj49\" (UniqueName: \"kubernetes.io/projected/41ffa5cf-477c-4a33-8409-18d2f40cecf3-kube-api-access-bcj49\") pod \"certified-operators-2fpz5\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:44 crc kubenswrapper[4664]: I1013 06:59:44.912013 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:45 crc kubenswrapper[4664]: I1013 06:59:45.013445 4664 generic.go:334] "Generic (PLEG): container finished" podID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerID="47adae6f1e4d41127594aef1f5fd65f4f2c1c086c9448da59594994b89e41564" exitCode=0 Oct 13 06:59:45 crc kubenswrapper[4664]: I1013 06:59:45.013484 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" event={"ID":"5b50ed9e-42f9-4a40-bdfb-61bc560256b5","Type":"ContainerDied","Data":"47adae6f1e4d41127594aef1f5fd65f4f2c1c086c9448da59594994b89e41564"} Oct 13 06:59:45 crc kubenswrapper[4664]: I1013 06:59:45.391370 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2fpz5"] Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.024865 4664 generic.go:334] "Generic (PLEG): container finished" podID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerID="d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20" exitCode=0 Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.024964 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2fpz5" event={"ID":"41ffa5cf-477c-4a33-8409-18d2f40cecf3","Type":"ContainerDied","Data":"d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20"} Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.026296 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2fpz5" event={"ID":"41ffa5cf-477c-4a33-8409-18d2f40cecf3","Type":"ContainerStarted","Data":"7f536eb16b7d11aab9ba07c85e454faabad513f8c2548522702d4536963dde21"} Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.301224 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.401120 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-util\") pod \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.401203 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-bundle\") pod \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.401231 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmvsz\" (UniqueName: \"kubernetes.io/projected/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-kube-api-access-zmvsz\") pod \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\" (UID: \"5b50ed9e-42f9-4a40-bdfb-61bc560256b5\") " Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.401937 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-bundle" (OuterVolumeSpecName: "bundle") pod "5b50ed9e-42f9-4a40-bdfb-61bc560256b5" (UID: "5b50ed9e-42f9-4a40-bdfb-61bc560256b5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.406650 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-kube-api-access-zmvsz" (OuterVolumeSpecName: "kube-api-access-zmvsz") pod "5b50ed9e-42f9-4a40-bdfb-61bc560256b5" (UID: "5b50ed9e-42f9-4a40-bdfb-61bc560256b5"). InnerVolumeSpecName "kube-api-access-zmvsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.419105 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-util" (OuterVolumeSpecName: "util") pod "5b50ed9e-42f9-4a40-bdfb-61bc560256b5" (UID: "5b50ed9e-42f9-4a40-bdfb-61bc560256b5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.502714 4664 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-util\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.502773 4664 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:46 crc kubenswrapper[4664]: I1013 06:59:46.502837 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmvsz\" (UniqueName: \"kubernetes.io/projected/5b50ed9e-42f9-4a40-bdfb-61bc560256b5-kube-api-access-zmvsz\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:47 crc kubenswrapper[4664]: I1013 06:59:47.034039 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" event={"ID":"5b50ed9e-42f9-4a40-bdfb-61bc560256b5","Type":"ContainerDied","Data":"8a725faf1669913ae925ca74ad47ce721e5feb3283531dcce5facc83e512cd79"} Oct 13 06:59:47 crc kubenswrapper[4664]: I1013 06:59:47.034344 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a725faf1669913ae925ca74ad47ce721e5feb3283531dcce5facc83e512cd79" Oct 13 06:59:47 crc kubenswrapper[4664]: I1013 06:59:47.034091 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn" Oct 13 06:59:47 crc kubenswrapper[4664]: I1013 06:59:47.036278 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2fpz5" event={"ID":"41ffa5cf-477c-4a33-8409-18d2f40cecf3","Type":"ContainerStarted","Data":"6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8"} Oct 13 06:59:48 crc kubenswrapper[4664]: I1013 06:59:48.061309 4664 generic.go:334] "Generic (PLEG): container finished" podID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerID="6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8" exitCode=0 Oct 13 06:59:48 crc kubenswrapper[4664]: I1013 06:59:48.061373 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2fpz5" event={"ID":"41ffa5cf-477c-4a33-8409-18d2f40cecf3","Type":"ContainerDied","Data":"6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8"} Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.067245 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2fpz5" event={"ID":"41ffa5cf-477c-4a33-8409-18d2f40cecf3","Type":"ContainerStarted","Data":"c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922"} Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.085161 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2fpz5" podStartSLOduration=2.629081622 podStartE2EDuration="5.085146676s" podCreationTimestamp="2025-10-13 06:59:44 +0000 UTC" firstStartedPulling="2025-10-13 06:59:46.027227606 +0000 UTC m=+793.714672838" lastFinishedPulling="2025-10-13 06:59:48.4832927 +0000 UTC m=+796.170737892" observedRunningTime="2025-10-13 06:59:49.082610378 +0000 UTC m=+796.770055570" watchObservedRunningTime="2025-10-13 06:59:49.085146676 +0000 UTC m=+796.772591868" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.426533 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw"] Oct 13 06:59:49 crc kubenswrapper[4664]: E1013 06:59:49.426758 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerName="pull" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.426769 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerName="pull" Oct 13 06:59:49 crc kubenswrapper[4664]: E1013 06:59:49.426778 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerName="extract" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.426783 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerName="extract" Oct 13 06:59:49 crc kubenswrapper[4664]: E1013 06:59:49.426803 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerName="util" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.426810 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerName="util" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.426910 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b50ed9e-42f9-4a40-bdfb-61bc560256b5" containerName="extract" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.427457 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.438078 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-5wbxk" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.525979 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw"] Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.548365 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2hng\" (UniqueName: \"kubernetes.io/projected/844ed68c-a79c-4751-98b2-d0459d583d06-kube-api-access-b2hng\") pod \"openstack-operator-controller-operator-688d597459-5n8bw\" (UID: \"844ed68c-a79c-4751-98b2-d0459d583d06\") " pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.649928 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2hng\" (UniqueName: \"kubernetes.io/projected/844ed68c-a79c-4751-98b2-d0459d583d06-kube-api-access-b2hng\") pod \"openstack-operator-controller-operator-688d597459-5n8bw\" (UID: \"844ed68c-a79c-4751-98b2-d0459d583d06\") " pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.672289 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2hng\" (UniqueName: \"kubernetes.io/projected/844ed68c-a79c-4751-98b2-d0459d583d06-kube-api-access-b2hng\") pod \"openstack-operator-controller-operator-688d597459-5n8bw\" (UID: \"844ed68c-a79c-4751-98b2-d0459d583d06\") " pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" Oct 13 06:59:49 crc kubenswrapper[4664]: I1013 06:59:49.742320 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" Oct 13 06:59:50 crc kubenswrapper[4664]: I1013 06:59:50.199121 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw"] Oct 13 06:59:50 crc kubenswrapper[4664]: W1013 06:59:50.220615 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod844ed68c_a79c_4751_98b2_d0459d583d06.slice/crio-8ad38454449eab665b0b02c88d5ce1ba6092a28c21e2e8feeb3b432b97f7a6ca WatchSource:0}: Error finding container 8ad38454449eab665b0b02c88d5ce1ba6092a28c21e2e8feeb3b432b97f7a6ca: Status 404 returned error can't find the container with id 8ad38454449eab665b0b02c88d5ce1ba6092a28c21e2e8feeb3b432b97f7a6ca Oct 13 06:59:51 crc kubenswrapper[4664]: I1013 06:59:51.079470 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" event={"ID":"844ed68c-a79c-4751-98b2-d0459d583d06","Type":"ContainerStarted","Data":"8ad38454449eab665b0b02c88d5ce1ba6092a28c21e2e8feeb3b432b97f7a6ca"} Oct 13 06:59:54 crc kubenswrapper[4664]: I1013 06:59:54.913319 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:54 crc kubenswrapper[4664]: I1013 06:59:54.914057 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:54 crc kubenswrapper[4664]: I1013 06:59:54.961228 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.109560 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" event={"ID":"844ed68c-a79c-4751-98b2-d0459d583d06","Type":"ContainerStarted","Data":"301071a3387263cf1dfc0dbdc4bfcbff0f6c958d3ce2656ab062ca7d60031481"} Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.161850 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.273723 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xgr78"] Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.275247 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.294701 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xgr78"] Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.457453 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-catalog-content\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.457524 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-utilities\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.457888 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjcfr\" (UniqueName: \"kubernetes.io/projected/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-kube-api-access-wjcfr\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.559296 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjcfr\" (UniqueName: \"kubernetes.io/projected/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-kube-api-access-wjcfr\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.559385 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-catalog-content\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.559408 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-utilities\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.559837 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-utilities\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.560449 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-catalog-content\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.592970 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjcfr\" (UniqueName: \"kubernetes.io/projected/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-kube-api-access-wjcfr\") pod \"community-operators-xgr78\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:55 crc kubenswrapper[4664]: I1013 06:59:55.602949 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgr78" Oct 13 06:59:56 crc kubenswrapper[4664]: I1013 06:59:56.847158 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xgr78"] Oct 13 06:59:57 crc kubenswrapper[4664]: I1013 06:59:57.125888 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgr78" event={"ID":"3eee6100-0579-4e11-a6a3-cf5103ac0bd2","Type":"ContainerStarted","Data":"795309f8be43a24f9c231209ce87299acfcda92e0f261db64094459f43f6554a"} Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.132998 4664 generic.go:334] "Generic (PLEG): container finished" podID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerID="536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262" exitCode=0 Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.133051 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgr78" event={"ID":"3eee6100-0579-4e11-a6a3-cf5103ac0bd2","Type":"ContainerDied","Data":"536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262"} Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.136479 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" event={"ID":"844ed68c-a79c-4751-98b2-d0459d583d06","Type":"ContainerStarted","Data":"7b43406cd46cb4935f53700274b06ce1e00ca9805be5a7c4c86537fe6ca98ea7"} Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.136671 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.207499 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" podStartSLOduration=1.916941779 podStartE2EDuration="9.207484947s" podCreationTimestamp="2025-10-13 06:59:49 +0000 UTC" firstStartedPulling="2025-10-13 06:59:50.223042599 +0000 UTC m=+797.910487791" lastFinishedPulling="2025-10-13 06:59:57.513585767 +0000 UTC m=+805.201030959" observedRunningTime="2025-10-13 06:59:58.199094804 +0000 UTC m=+805.886540006" watchObservedRunningTime="2025-10-13 06:59:58.207484947 +0000 UTC m=+805.894930139" Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.464335 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2fpz5"] Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.465203 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2fpz5" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="registry-server" containerID="cri-o://c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922" gracePeriod=2 Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.812752 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.812816 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.823271 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.924152 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-catalog-content\") pod \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.924238 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcj49\" (UniqueName: \"kubernetes.io/projected/41ffa5cf-477c-4a33-8409-18d2f40cecf3-kube-api-access-bcj49\") pod \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.924286 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-utilities\") pod \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\" (UID: \"41ffa5cf-477c-4a33-8409-18d2f40cecf3\") " Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.925280 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-utilities" (OuterVolumeSpecName: "utilities") pod "41ffa5cf-477c-4a33-8409-18d2f40cecf3" (UID: "41ffa5cf-477c-4a33-8409-18d2f40cecf3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.936546 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41ffa5cf-477c-4a33-8409-18d2f40cecf3-kube-api-access-bcj49" (OuterVolumeSpecName: "kube-api-access-bcj49") pod "41ffa5cf-477c-4a33-8409-18d2f40cecf3" (UID: "41ffa5cf-477c-4a33-8409-18d2f40cecf3"). InnerVolumeSpecName "kube-api-access-bcj49". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 06:59:58 crc kubenswrapper[4664]: I1013 06:59:58.972765 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41ffa5cf-477c-4a33-8409-18d2f40cecf3" (UID: "41ffa5cf-477c-4a33-8409-18d2f40cecf3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.026096 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.026635 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcj49\" (UniqueName: \"kubernetes.io/projected/41ffa5cf-477c-4a33-8409-18d2f40cecf3-kube-api-access-bcj49\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.026650 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41ffa5cf-477c-4a33-8409-18d2f40cecf3-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.145930 4664 generic.go:334] "Generic (PLEG): container finished" podID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerID="3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d" exitCode=0 Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.146031 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgr78" event={"ID":"3eee6100-0579-4e11-a6a3-cf5103ac0bd2","Type":"ContainerDied","Data":"3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d"} Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.152836 4664 generic.go:334] "Generic (PLEG): container finished" podID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerID="c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922" exitCode=0 Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.153505 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2fpz5" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.153627 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2fpz5" event={"ID":"41ffa5cf-477c-4a33-8409-18d2f40cecf3","Type":"ContainerDied","Data":"c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922"} Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.153680 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2fpz5" event={"ID":"41ffa5cf-477c-4a33-8409-18d2f40cecf3","Type":"ContainerDied","Data":"7f536eb16b7d11aab9ba07c85e454faabad513f8c2548522702d4536963dde21"} Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.153698 4664 scope.go:117] "RemoveContainer" containerID="c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.164615 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.181129 4664 scope.go:117] "RemoveContainer" containerID="6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.192545 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2fpz5"] Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.198953 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2fpz5"] Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.212121 4664 scope.go:117] "RemoveContainer" containerID="d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.238132 4664 scope.go:117] "RemoveContainer" containerID="c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922" Oct 13 06:59:59 crc kubenswrapper[4664]: E1013 06:59:59.238778 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922\": container with ID starting with c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922 not found: ID does not exist" containerID="c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.238876 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922"} err="failed to get container status \"c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922\": rpc error: code = NotFound desc = could not find container \"c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922\": container with ID starting with c781173c9cb035f8cd97d436876567d6c2a7b8101d377616f37b83c1844ee922 not found: ID does not exist" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.238919 4664 scope.go:117] "RemoveContainer" containerID="6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8" Oct 13 06:59:59 crc kubenswrapper[4664]: E1013 06:59:59.239672 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8\": container with ID starting with 6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8 not found: ID does not exist" containerID="6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.239695 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8"} err="failed to get container status \"6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8\": rpc error: code = NotFound desc = could not find container \"6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8\": container with ID starting with 6825ed2fcbf5a3aa462af5eb130cfc656dc117ae0f52fe8a4850c99ae1fe55d8 not found: ID does not exist" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.239710 4664 scope.go:117] "RemoveContainer" containerID="d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20" Oct 13 06:59:59 crc kubenswrapper[4664]: E1013 06:59:59.240375 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20\": container with ID starting with d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20 not found: ID does not exist" containerID="d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20" Oct 13 06:59:59 crc kubenswrapper[4664]: I1013 06:59:59.240399 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20"} err="failed to get container status \"d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20\": rpc error: code = NotFound desc = could not find container \"d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20\": container with ID starting with d6a6dbfc9a657e25f59bb422a6144d71a20ecf5bb90d0142d4f7d4d0ed01fe20 not found: ID does not exist" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.142622 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5"] Oct 13 07:00:00 crc kubenswrapper[4664]: E1013 07:00:00.142839 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="extract-content" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.142851 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="extract-content" Oct 13 07:00:00 crc kubenswrapper[4664]: E1013 07:00:00.142863 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="registry-server" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.142868 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="registry-server" Oct 13 07:00:00 crc kubenswrapper[4664]: E1013 07:00:00.142880 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="extract-utilities" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.142886 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="extract-utilities" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.142997 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" containerName="registry-server" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.143350 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.145583 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.146525 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.160524 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgr78" event={"ID":"3eee6100-0579-4e11-a6a3-cf5103ac0bd2","Type":"ContainerStarted","Data":"1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f"} Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.165535 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5"] Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.239248 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xgr78" podStartSLOduration=3.789498016 podStartE2EDuration="5.239231481s" podCreationTimestamp="2025-10-13 06:59:55 +0000 UTC" firstStartedPulling="2025-10-13 06:59:58.135115385 +0000 UTC m=+805.822560577" lastFinishedPulling="2025-10-13 06:59:59.58484882 +0000 UTC m=+807.272294042" observedRunningTime="2025-10-13 07:00:00.237112845 +0000 UTC m=+807.924558047" watchObservedRunningTime="2025-10-13 07:00:00.239231481 +0000 UTC m=+807.926676673" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.245704 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjs4n\" (UniqueName: \"kubernetes.io/projected/1dd88a20-51cc-4081-9d90-653d6867555d-kube-api-access-qjs4n\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.245787 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1dd88a20-51cc-4081-9d90-653d6867555d-secret-volume\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.245850 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1dd88a20-51cc-4081-9d90-653d6867555d-config-volume\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.347588 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1dd88a20-51cc-4081-9d90-653d6867555d-config-volume\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.347700 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjs4n\" (UniqueName: \"kubernetes.io/projected/1dd88a20-51cc-4081-9d90-653d6867555d-kube-api-access-qjs4n\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.347741 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1dd88a20-51cc-4081-9d90-653d6867555d-secret-volume\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.349115 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1dd88a20-51cc-4081-9d90-653d6867555d-config-volume\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.365726 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1dd88a20-51cc-4081-9d90-653d6867555d-secret-volume\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.366924 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjs4n\" (UniqueName: \"kubernetes.io/projected/1dd88a20-51cc-4081-9d90-653d6867555d-kube-api-access-qjs4n\") pod \"collect-profiles-29338980-sgwn5\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.458309 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:00 crc kubenswrapper[4664]: I1013 07:00:00.932120 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5"] Oct 13 07:00:00 crc kubenswrapper[4664]: W1013 07:00:00.934728 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1dd88a20_51cc_4081_9d90_653d6867555d.slice/crio-7c98901209fbb96e5299a6cfef5a4d255ffbf99b3e671d978df437fe61bc5afe WatchSource:0}: Error finding container 7c98901209fbb96e5299a6cfef5a4d255ffbf99b3e671d978df437fe61bc5afe: Status 404 returned error can't find the container with id 7c98901209fbb96e5299a6cfef5a4d255ffbf99b3e671d978df437fe61bc5afe Oct 13 07:00:01 crc kubenswrapper[4664]: I1013 07:00:01.056015 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41ffa5cf-477c-4a33-8409-18d2f40cecf3" path="/var/lib/kubelet/pods/41ffa5cf-477c-4a33-8409-18d2f40cecf3/volumes" Oct 13 07:00:01 crc kubenswrapper[4664]: I1013 07:00:01.172236 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" event={"ID":"1dd88a20-51cc-4081-9d90-653d6867555d","Type":"ContainerStarted","Data":"8a88c93bb84ca02f583213f0a0adc3b4f1497639e8034a8529b58427db54e400"} Oct 13 07:00:01 crc kubenswrapper[4664]: I1013 07:00:01.173317 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" event={"ID":"1dd88a20-51cc-4081-9d90-653d6867555d","Type":"ContainerStarted","Data":"7c98901209fbb96e5299a6cfef5a4d255ffbf99b3e671d978df437fe61bc5afe"} Oct 13 07:00:01 crc kubenswrapper[4664]: I1013 07:00:01.190074 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" podStartSLOduration=1.190055326 podStartE2EDuration="1.190055326s" podCreationTimestamp="2025-10-13 07:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:00:01.189228473 +0000 UTC m=+808.876673665" watchObservedRunningTime="2025-10-13 07:00:01.190055326 +0000 UTC m=+808.877500518" Oct 13 07:00:01 crc kubenswrapper[4664]: I1013 07:00:01.873011 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c58kd"] Oct 13 07:00:01 crc kubenswrapper[4664]: I1013 07:00:01.874443 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:01 crc kubenswrapper[4664]: I1013 07:00:01.884321 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c58kd"] Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.068981 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-catalog-content\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.069045 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdzgh\" (UniqueName: \"kubernetes.io/projected/89af999e-7634-484d-8c93-8a9abd790204-kube-api-access-vdzgh\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.069071 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-utilities\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.072738 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rc445"] Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.074914 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.086577 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rc445"] Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.170250 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-utilities\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.170321 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdzgh\" (UniqueName: \"kubernetes.io/projected/89af999e-7634-484d-8c93-8a9abd790204-kube-api-access-vdzgh\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.170368 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-catalog-content\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.170404 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-utilities\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.170624 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd6b2\" (UniqueName: \"kubernetes.io/projected/73874794-d495-48e3-9087-8c37dc2dd70c-kube-api-access-rd6b2\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.170849 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-catalog-content\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.170953 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-utilities\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.171286 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-catalog-content\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.178142 4664 generic.go:334] "Generic (PLEG): container finished" podID="1dd88a20-51cc-4081-9d90-653d6867555d" containerID="8a88c93bb84ca02f583213f0a0adc3b4f1497639e8034a8529b58427db54e400" exitCode=0 Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.178192 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" event={"ID":"1dd88a20-51cc-4081-9d90-653d6867555d","Type":"ContainerDied","Data":"8a88c93bb84ca02f583213f0a0adc3b4f1497639e8034a8529b58427db54e400"} Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.207909 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdzgh\" (UniqueName: \"kubernetes.io/projected/89af999e-7634-484d-8c93-8a9abd790204-kube-api-access-vdzgh\") pod \"redhat-marketplace-c58kd\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.273086 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-utilities\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.273235 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-catalog-content\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.273844 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-utilities\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.273963 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-catalog-content\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.274060 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd6b2\" (UniqueName: \"kubernetes.io/projected/73874794-d495-48e3-9087-8c37dc2dd70c-kube-api-access-rd6b2\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.320960 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd6b2\" (UniqueName: \"kubernetes.io/projected/73874794-d495-48e3-9087-8c37dc2dd70c-kube-api-access-rd6b2\") pod \"redhat-operators-rc445\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.397690 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.488880 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.762485 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rc445"] Oct 13 07:00:02 crc kubenswrapper[4664]: I1013 07:00:02.876455 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c58kd"] Oct 13 07:00:02 crc kubenswrapper[4664]: W1013 07:00:02.926933 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89af999e_7634_484d_8c93_8a9abd790204.slice/crio-1957b8dd8ff370f1f683e4cda3bcb97dd1266c90467412a8fef9333d8b848669 WatchSource:0}: Error finding container 1957b8dd8ff370f1f683e4cda3bcb97dd1266c90467412a8fef9333d8b848669: Status 404 returned error can't find the container with id 1957b8dd8ff370f1f683e4cda3bcb97dd1266c90467412a8fef9333d8b848669 Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.184442 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c58kd" event={"ID":"89af999e-7634-484d-8c93-8a9abd790204","Type":"ContainerStarted","Data":"1957b8dd8ff370f1f683e4cda3bcb97dd1266c90467412a8fef9333d8b848669"} Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.185543 4664 generic.go:334] "Generic (PLEG): container finished" podID="73874794-d495-48e3-9087-8c37dc2dd70c" containerID="1e38813547cb4d3edcb82c3d1c5e2388bd9bd5dc0dc591548b09cd44cf2a92ff" exitCode=0 Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.186211 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rc445" event={"ID":"73874794-d495-48e3-9087-8c37dc2dd70c","Type":"ContainerDied","Data":"1e38813547cb4d3edcb82c3d1c5e2388bd9bd5dc0dc591548b09cd44cf2a92ff"} Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.186231 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rc445" event={"ID":"73874794-d495-48e3-9087-8c37dc2dd70c","Type":"ContainerStarted","Data":"bb029e5323ebe14504044ba386bad68a245ef287ddc3004b5b7f732eb09768ed"} Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.527148 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.622585 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1dd88a20-51cc-4081-9d90-653d6867555d-secret-volume\") pod \"1dd88a20-51cc-4081-9d90-653d6867555d\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.622648 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjs4n\" (UniqueName: \"kubernetes.io/projected/1dd88a20-51cc-4081-9d90-653d6867555d-kube-api-access-qjs4n\") pod \"1dd88a20-51cc-4081-9d90-653d6867555d\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.622685 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1dd88a20-51cc-4081-9d90-653d6867555d-config-volume\") pod \"1dd88a20-51cc-4081-9d90-653d6867555d\" (UID: \"1dd88a20-51cc-4081-9d90-653d6867555d\") " Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.623699 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1dd88a20-51cc-4081-9d90-653d6867555d-config-volume" (OuterVolumeSpecName: "config-volume") pod "1dd88a20-51cc-4081-9d90-653d6867555d" (UID: "1dd88a20-51cc-4081-9d90-653d6867555d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.631193 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dd88a20-51cc-4081-9d90-653d6867555d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1dd88a20-51cc-4081-9d90-653d6867555d" (UID: "1dd88a20-51cc-4081-9d90-653d6867555d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.632054 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dd88a20-51cc-4081-9d90-653d6867555d-kube-api-access-qjs4n" (OuterVolumeSpecName: "kube-api-access-qjs4n") pod "1dd88a20-51cc-4081-9d90-653d6867555d" (UID: "1dd88a20-51cc-4081-9d90-653d6867555d"). InnerVolumeSpecName "kube-api-access-qjs4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.724975 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1dd88a20-51cc-4081-9d90-653d6867555d-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.725028 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjs4n\" (UniqueName: \"kubernetes.io/projected/1dd88a20-51cc-4081-9d90-653d6867555d-kube-api-access-qjs4n\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:03 crc kubenswrapper[4664]: I1013 07:00:03.725041 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1dd88a20-51cc-4081-9d90-653d6867555d-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:04 crc kubenswrapper[4664]: I1013 07:00:04.195004 4664 generic.go:334] "Generic (PLEG): container finished" podID="89af999e-7634-484d-8c93-8a9abd790204" containerID="3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4" exitCode=0 Oct 13 07:00:04 crc kubenswrapper[4664]: I1013 07:00:04.195071 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c58kd" event={"ID":"89af999e-7634-484d-8c93-8a9abd790204","Type":"ContainerDied","Data":"3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4"} Oct 13 07:00:04 crc kubenswrapper[4664]: I1013 07:00:04.206024 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" event={"ID":"1dd88a20-51cc-4081-9d90-653d6867555d","Type":"ContainerDied","Data":"7c98901209fbb96e5299a6cfef5a4d255ffbf99b3e671d978df437fe61bc5afe"} Oct 13 07:00:04 crc kubenswrapper[4664]: I1013 07:00:04.206058 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c98901209fbb96e5299a6cfef5a4d255ffbf99b3e671d978df437fe61bc5afe" Oct 13 07:00:04 crc kubenswrapper[4664]: I1013 07:00:04.206115 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5" Oct 13 07:00:05 crc kubenswrapper[4664]: I1013 07:00:05.214021 4664 generic.go:334] "Generic (PLEG): container finished" podID="73874794-d495-48e3-9087-8c37dc2dd70c" containerID="0f2419776bf92ada04a229d8247084990e42b7cf0f79a8892762f12131dedfb9" exitCode=0 Oct 13 07:00:05 crc kubenswrapper[4664]: I1013 07:00:05.214098 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rc445" event={"ID":"73874794-d495-48e3-9087-8c37dc2dd70c","Type":"ContainerDied","Data":"0f2419776bf92ada04a229d8247084990e42b7cf0f79a8892762f12131dedfb9"} Oct 13 07:00:05 crc kubenswrapper[4664]: I1013 07:00:05.603706 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xgr78" Oct 13 07:00:05 crc kubenswrapper[4664]: I1013 07:00:05.603986 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xgr78" Oct 13 07:00:05 crc kubenswrapper[4664]: I1013 07:00:05.735376 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xgr78" Oct 13 07:00:06 crc kubenswrapper[4664]: I1013 07:00:06.222524 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rc445" event={"ID":"73874794-d495-48e3-9087-8c37dc2dd70c","Type":"ContainerStarted","Data":"85e635836363e4798c3da2e76ef85fcb93b84152ac0c00e282324b159bc91730"} Oct 13 07:00:06 crc kubenswrapper[4664]: I1013 07:00:06.225012 4664 generic.go:334] "Generic (PLEG): container finished" podID="89af999e-7634-484d-8c93-8a9abd790204" containerID="cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0" exitCode=0 Oct 13 07:00:06 crc kubenswrapper[4664]: I1013 07:00:06.225863 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c58kd" event={"ID":"89af999e-7634-484d-8c93-8a9abd790204","Type":"ContainerDied","Data":"cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0"} Oct 13 07:00:06 crc kubenswrapper[4664]: I1013 07:00:06.254751 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rc445" podStartSLOduration=1.8184953369999999 podStartE2EDuration="4.254735485s" podCreationTimestamp="2025-10-13 07:00:02 +0000 UTC" firstStartedPulling="2025-10-13 07:00:03.186920713 +0000 UTC m=+810.874365905" lastFinishedPulling="2025-10-13 07:00:05.623160861 +0000 UTC m=+813.310606053" observedRunningTime="2025-10-13 07:00:06.252224478 +0000 UTC m=+813.939669680" watchObservedRunningTime="2025-10-13 07:00:06.254735485 +0000 UTC m=+813.942180677" Oct 13 07:00:06 crc kubenswrapper[4664]: I1013 07:00:06.275596 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xgr78" Oct 13 07:00:07 crc kubenswrapper[4664]: I1013 07:00:07.232319 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c58kd" event={"ID":"89af999e-7634-484d-8c93-8a9abd790204","Type":"ContainerStarted","Data":"5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3"} Oct 13 07:00:07 crc kubenswrapper[4664]: I1013 07:00:07.251549 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c58kd" podStartSLOduration=3.718712628 podStartE2EDuration="6.251534581s" podCreationTimestamp="2025-10-13 07:00:01 +0000 UTC" firstStartedPulling="2025-10-13 07:00:04.199093087 +0000 UTC m=+811.886538279" lastFinishedPulling="2025-10-13 07:00:06.73191504 +0000 UTC m=+814.419360232" observedRunningTime="2025-10-13 07:00:07.250348659 +0000 UTC m=+814.937793861" watchObservedRunningTime="2025-10-13 07:00:07.251534581 +0000 UTC m=+814.938979773" Oct 13 07:00:08 crc kubenswrapper[4664]: I1013 07:00:08.663879 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xgr78"] Oct 13 07:00:08 crc kubenswrapper[4664]: I1013 07:00:08.664706 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xgr78" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="registry-server" containerID="cri-o://1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f" gracePeriod=2 Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.052366 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgr78" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.198282 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjcfr\" (UniqueName: \"kubernetes.io/projected/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-kube-api-access-wjcfr\") pod \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.198619 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-catalog-content\") pod \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.198769 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-utilities\") pod \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\" (UID: \"3eee6100-0579-4e11-a6a3-cf5103ac0bd2\") " Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.199464 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-utilities" (OuterVolumeSpecName: "utilities") pod "3eee6100-0579-4e11-a6a3-cf5103ac0bd2" (UID: "3eee6100-0579-4e11-a6a3-cf5103ac0bd2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.199690 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.212993 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-kube-api-access-wjcfr" (OuterVolumeSpecName: "kube-api-access-wjcfr") pod "3eee6100-0579-4e11-a6a3-cf5103ac0bd2" (UID: "3eee6100-0579-4e11-a6a3-cf5103ac0bd2"). InnerVolumeSpecName "kube-api-access-wjcfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.251263 4664 generic.go:334] "Generic (PLEG): container finished" podID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerID="1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f" exitCode=0 Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.251300 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgr78" event={"ID":"3eee6100-0579-4e11-a6a3-cf5103ac0bd2","Type":"ContainerDied","Data":"1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f"} Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.251325 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgr78" event={"ID":"3eee6100-0579-4e11-a6a3-cf5103ac0bd2","Type":"ContainerDied","Data":"795309f8be43a24f9c231209ce87299acfcda92e0f261db64094459f43f6554a"} Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.251342 4664 scope.go:117] "RemoveContainer" containerID="1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.251452 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgr78" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.274123 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3eee6100-0579-4e11-a6a3-cf5103ac0bd2" (UID: "3eee6100-0579-4e11-a6a3-cf5103ac0bd2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.280916 4664 scope.go:117] "RemoveContainer" containerID="3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.301156 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjcfr\" (UniqueName: \"kubernetes.io/projected/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-kube-api-access-wjcfr\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.301194 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eee6100-0579-4e11-a6a3-cf5103ac0bd2-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.302594 4664 scope.go:117] "RemoveContainer" containerID="536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.328816 4664 scope.go:117] "RemoveContainer" containerID="1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f" Oct 13 07:00:09 crc kubenswrapper[4664]: E1013 07:00:09.329419 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f\": container with ID starting with 1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f not found: ID does not exist" containerID="1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.329450 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f"} err="failed to get container status \"1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f\": rpc error: code = NotFound desc = could not find container \"1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f\": container with ID starting with 1eeef4d8350e775af8714b5ad4ac8c4239d9e236c0ded13a2b934efcf803f45f not found: ID does not exist" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.329471 4664 scope.go:117] "RemoveContainer" containerID="3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d" Oct 13 07:00:09 crc kubenswrapper[4664]: E1013 07:00:09.329761 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d\": container with ID starting with 3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d not found: ID does not exist" containerID="3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.329833 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d"} err="failed to get container status \"3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d\": rpc error: code = NotFound desc = could not find container \"3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d\": container with ID starting with 3121ae8d9e993286ca0ed12eb5358700d7ed8d3f92c772d3cfef9c60f7e0dd6d not found: ID does not exist" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.329852 4664 scope.go:117] "RemoveContainer" containerID="536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262" Oct 13 07:00:09 crc kubenswrapper[4664]: E1013 07:00:09.330132 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262\": container with ID starting with 536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262 not found: ID does not exist" containerID="536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.330165 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262"} err="failed to get container status \"536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262\": rpc error: code = NotFound desc = could not find container \"536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262\": container with ID starting with 536d307fcf1e9382360d8f5d3420057230c598ee6ae0fd058b34c2b9ed1be262 not found: ID does not exist" Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.577076 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xgr78"] Oct 13 07:00:09 crc kubenswrapper[4664]: I1013 07:00:09.588167 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xgr78"] Oct 13 07:00:11 crc kubenswrapper[4664]: I1013 07:00:11.055229 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" path="/var/lib/kubelet/pods/3eee6100-0579-4e11-a6a3-cf5103ac0bd2/volumes" Oct 13 07:00:12 crc kubenswrapper[4664]: I1013 07:00:12.398344 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:12 crc kubenswrapper[4664]: I1013 07:00:12.398580 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:12 crc kubenswrapper[4664]: I1013 07:00:12.456345 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:12 crc kubenswrapper[4664]: I1013 07:00:12.489393 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:12 crc kubenswrapper[4664]: I1013 07:00:12.489452 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:12 crc kubenswrapper[4664]: I1013 07:00:12.533561 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:13 crc kubenswrapper[4664]: I1013 07:00:13.330157 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:13 crc kubenswrapper[4664]: I1013 07:00:13.332861 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:14 crc kubenswrapper[4664]: I1013 07:00:14.272190 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c58kd"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.290740 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-c58kd" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="registry-server" containerID="cri-o://5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3" gracePeriod=2 Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.667872 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rc445"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.771723 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803467 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt"] Oct 13 07:00:15 crc kubenswrapper[4664]: E1013 07:00:15.803659 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="registry-server" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803670 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="registry-server" Oct 13 07:00:15 crc kubenswrapper[4664]: E1013 07:00:15.803680 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="extract-utilities" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803686 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="extract-utilities" Oct 13 07:00:15 crc kubenswrapper[4664]: E1013 07:00:15.803697 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="extract-content" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803703 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="extract-content" Oct 13 07:00:15 crc kubenswrapper[4664]: E1013 07:00:15.803708 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="registry-server" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803714 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="registry-server" Oct 13 07:00:15 crc kubenswrapper[4664]: E1013 07:00:15.803721 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="extract-content" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803728 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="extract-content" Oct 13 07:00:15 crc kubenswrapper[4664]: E1013 07:00:15.803741 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dd88a20-51cc-4081-9d90-653d6867555d" containerName="collect-profiles" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803747 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dd88a20-51cc-4081-9d90-653d6867555d" containerName="collect-profiles" Oct 13 07:00:15 crc kubenswrapper[4664]: E1013 07:00:15.803755 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="extract-utilities" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.803760 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="extract-utilities" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.806993 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dd88a20-51cc-4081-9d90-653d6867555d" containerName="collect-profiles" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.807012 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="89af999e-7634-484d-8c93-8a9abd790204" containerName="registry-server" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.807026 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3eee6100-0579-4e11-a6a3-cf5103ac0bd2" containerName="registry-server" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.807598 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.811232 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-f66nh" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.815735 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.816631 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.821457 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-j6nfw" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.829090 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.838403 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.849073 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.849182 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.852751 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-lqpbz" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.872533 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.873560 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.888527 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-6x748" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.890567 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdzgh\" (UniqueName: \"kubernetes.io/projected/89af999e-7634-484d-8c93-8a9abd790204-kube-api-access-vdzgh\") pod \"89af999e-7634-484d-8c93-8a9abd790204\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.890910 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-utilities\") pod \"89af999e-7634-484d-8c93-8a9abd790204\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.891064 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-catalog-content\") pod \"89af999e-7634-484d-8c93-8a9abd790204\" (UID: \"89af999e-7634-484d-8c93-8a9abd790204\") " Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.895085 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.895696 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-utilities" (OuterVolumeSpecName: "utilities") pod "89af999e-7634-484d-8c93-8a9abd790204" (UID: "89af999e-7634-484d-8c93-8a9abd790204"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.897710 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.898178 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89af999e-7634-484d-8c93-8a9abd790204-kube-api-access-vdzgh" (OuterVolumeSpecName: "kube-api-access-vdzgh") pod "89af999e-7634-484d-8c93-8a9abd790204" (UID: "89af999e-7634-484d-8c93-8a9abd790204"). InnerVolumeSpecName "kube-api-access-vdzgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.903884 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.904745 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.907921 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89af999e-7634-484d-8c93-8a9abd790204" (UID: "89af999e-7634-484d-8c93-8a9abd790204"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.908324 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-9nm56" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.925305 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.947047 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.948761 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.960189 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-w8wkg" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.985854 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.994331 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5g7sz\" (UniqueName: \"kubernetes.io/projected/9f6b6a3e-5706-49fa-aafa-49f68b19997e-kube-api-access-5g7sz\") pod \"barbican-operator-controller-manager-658bdf4b74-m4bpt\" (UID: \"9f6b6a3e-5706-49fa-aafa-49f68b19997e\") " pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.994394 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlzms\" (UniqueName: \"kubernetes.io/projected/3c368f1e-93f0-440a-ad95-d205dd78e4b2-kube-api-access-nlzms\") pod \"cinder-operator-controller-manager-7b7fb68549-zptb2\" (UID: \"3c368f1e-93f0-440a-ad95-d205dd78e4b2\") " pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.994450 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzqxb\" (UniqueName: \"kubernetes.io/projected/deda5c15-ffb4-44e6-9e27-465106737111-kube-api-access-tzqxb\") pod \"designate-operator-controller-manager-85d5d9dd78-sl4lq\" (UID: \"deda5c15-ffb4-44e6-9e27-465106737111\") " pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.994475 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7zxf\" (UniqueName: \"kubernetes.io/projected/68c3d701-56d2-4bce-8c6e-e1894084fecf-kube-api-access-z7zxf\") pod \"glance-operator-controller-manager-84b9b84486-8hnvm\" (UID: \"68c3d701-56d2-4bce-8c6e-e1894084fecf\") " pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.994516 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.994527 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89af999e-7634-484d-8c93-8a9abd790204-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.994537 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdzgh\" (UniqueName: \"kubernetes.io/projected/89af999e-7634-484d-8c93-8a9abd790204-kube-api-access-vdzgh\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.996781 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w"] Oct 13 07:00:15 crc kubenswrapper[4664]: I1013 07:00:15.997706 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.000892 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-crmx8" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.005966 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.006835 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.011651 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.026873 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.027489 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-2vst8" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.044973 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.045967 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.048781 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-8k5lf" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.056687 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.063257 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.074819 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.077871 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.081912 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-gxhfl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.092757 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.093879 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.098776 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-s8hnw" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.099091 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101031 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlzms\" (UniqueName: \"kubernetes.io/projected/3c368f1e-93f0-440a-ad95-d205dd78e4b2-kube-api-access-nlzms\") pod \"cinder-operator-controller-manager-7b7fb68549-zptb2\" (UID: \"3c368f1e-93f0-440a-ad95-d205dd78e4b2\") " pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101266 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sgd9\" (UniqueName: \"kubernetes.io/projected/bd3c97ad-17c7-47d7-ae5e-1a67c489c142-kube-api-access-5sgd9\") pod \"heat-operator-controller-manager-858f76bbdd-ncsms\" (UID: \"bd3c97ad-17c7-47d7-ae5e-1a67c489c142\") " pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101435 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzqxb\" (UniqueName: \"kubernetes.io/projected/deda5c15-ffb4-44e6-9e27-465106737111-kube-api-access-tzqxb\") pod \"designate-operator-controller-manager-85d5d9dd78-sl4lq\" (UID: \"deda5c15-ffb4-44e6-9e27-465106737111\") " pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101546 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7zxf\" (UniqueName: \"kubernetes.io/projected/68c3d701-56d2-4bce-8c6e-e1894084fecf-kube-api-access-z7zxf\") pod \"glance-operator-controller-manager-84b9b84486-8hnvm\" (UID: \"68c3d701-56d2-4bce-8c6e-e1894084fecf\") " pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101635 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks6p6\" (UniqueName: \"kubernetes.io/projected/a096a4c5-5890-4100-8462-ec39d621ff38-kube-api-access-ks6p6\") pod \"infra-operator-controller-manager-656bcbd775-22k5w\" (UID: \"a096a4c5-5890-4100-8462-ec39d621ff38\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101721 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5g7sz\" (UniqueName: \"kubernetes.io/projected/9f6b6a3e-5706-49fa-aafa-49f68b19997e-kube-api-access-5g7sz\") pod \"barbican-operator-controller-manager-658bdf4b74-m4bpt\" (UID: \"9f6b6a3e-5706-49fa-aafa-49f68b19997e\") " pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101813 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a096a4c5-5890-4100-8462-ec39d621ff38-cert\") pod \"infra-operator-controller-manager-656bcbd775-22k5w\" (UID: \"a096a4c5-5890-4100-8462-ec39d621ff38\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.101892 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xcxq\" (UniqueName: \"kubernetes.io/projected/5daf4cb7-d305-4408-97d8-9645cd4e61d5-kube-api-access-9xcxq\") pod \"horizon-operator-controller-manager-7ffbcb7588-s6ftn\" (UID: \"5daf4cb7-d305-4408-97d8-9645cd4e61d5\") " pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.103294 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.120533 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.122068 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.132171 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5g7sz\" (UniqueName: \"kubernetes.io/projected/9f6b6a3e-5706-49fa-aafa-49f68b19997e-kube-api-access-5g7sz\") pod \"barbican-operator-controller-manager-658bdf4b74-m4bpt\" (UID: \"9f6b6a3e-5706-49fa-aafa-49f68b19997e\") " pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.132526 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-kwtp2" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.137309 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.138296 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7zxf\" (UniqueName: \"kubernetes.io/projected/68c3d701-56d2-4bce-8c6e-e1894084fecf-kube-api-access-z7zxf\") pod \"glance-operator-controller-manager-84b9b84486-8hnvm\" (UID: \"68c3d701-56d2-4bce-8c6e-e1894084fecf\") " pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.152103 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzqxb\" (UniqueName: \"kubernetes.io/projected/deda5c15-ffb4-44e6-9e27-465106737111-kube-api-access-tzqxb\") pod \"designate-operator-controller-manager-85d5d9dd78-sl4lq\" (UID: \"deda5c15-ffb4-44e6-9e27-465106737111\") " pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.156966 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-5df598886f-mcst5"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.158600 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.168719 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-wtf76" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.170294 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlzms\" (UniqueName: \"kubernetes.io/projected/3c368f1e-93f0-440a-ad95-d205dd78e4b2-kube-api-access-nlzms\") pod \"cinder-operator-controller-manager-7b7fb68549-zptb2\" (UID: \"3c368f1e-93f0-440a-ad95-d205dd78e4b2\") " pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.173522 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.213750 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks6p6\" (UniqueName: \"kubernetes.io/projected/a096a4c5-5890-4100-8462-ec39d621ff38-kube-api-access-ks6p6\") pod \"infra-operator-controller-manager-656bcbd775-22k5w\" (UID: \"a096a4c5-5890-4100-8462-ec39d621ff38\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.213829 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vpd2\" (UniqueName: \"kubernetes.io/projected/de55f4c7-2413-4a00-8691-a9545525fc88-kube-api-access-4vpd2\") pod \"ironic-operator-controller-manager-9c5c78d49-srnhl\" (UID: \"de55f4c7-2413-4a00-8691-a9545525fc88\") " pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.213859 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a096a4c5-5890-4100-8462-ec39d621ff38-cert\") pod \"infra-operator-controller-manager-656bcbd775-22k5w\" (UID: \"a096a4c5-5890-4100-8462-ec39d621ff38\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.213884 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xcxq\" (UniqueName: \"kubernetes.io/projected/5daf4cb7-d305-4408-97d8-9645cd4e61d5-kube-api-access-9xcxq\") pod \"horizon-operator-controller-manager-7ffbcb7588-s6ftn\" (UID: \"5daf4cb7-d305-4408-97d8-9645cd4e61d5\") " pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.213977 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn6vx\" (UniqueName: \"kubernetes.io/projected/5367fac7-2b9f-4745-b3cb-4accdf26ef59-kube-api-access-qn6vx\") pod \"manila-operator-controller-manager-5f67fbc655-6mlnb\" (UID: \"5367fac7-2b9f-4745-b3cb-4accdf26ef59\") " pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.214045 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nqm9\" (UniqueName: \"kubernetes.io/projected/bf55d8b4-9315-48b2-962c-318911833b6f-kube-api-access-9nqm9\") pod \"keystone-operator-controller-manager-55b6b7c7b8-f57sl\" (UID: \"bf55d8b4-9315-48b2-962c-318911833b6f\") " pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.214069 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sgd9\" (UniqueName: \"kubernetes.io/projected/bd3c97ad-17c7-47d7-ae5e-1a67c489c142-kube-api-access-5sgd9\") pod \"heat-operator-controller-manager-858f76bbdd-ncsms\" (UID: \"bd3c97ad-17c7-47d7-ae5e-1a67c489c142\") " pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.214399 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxjw9\" (UniqueName: \"kubernetes.io/projected/a76ae989-9e97-43ee-a38f-ebb30be19ab6-kube-api-access-jxjw9\") pod \"mariadb-operator-controller-manager-f9fb45f8f-f2nxl\" (UID: \"a76ae989-9e97-43ee-a38f-ebb30be19ab6\") " pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.227702 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.228244 4664 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.228301 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a096a4c5-5890-4100-8462-ec39d621ff38-cert podName:a096a4c5-5890-4100-8462-ec39d621ff38 nodeName:}" failed. No retries permitted until 2025-10-13 07:00:16.728286556 +0000 UTC m=+824.415731748 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a096a4c5-5890-4100-8462-ec39d621ff38-cert") pod "infra-operator-controller-manager-656bcbd775-22k5w" (UID: "a096a4c5-5890-4100-8462-ec39d621ff38") : secret "infra-operator-webhook-server-cert" not found Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.235684 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5df598886f-mcst5"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.280650 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.282014 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.287340 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-h9767" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.293609 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xcxq\" (UniqueName: \"kubernetes.io/projected/5daf4cb7-d305-4408-97d8-9645cd4e61d5-kube-api-access-9xcxq\") pod \"horizon-operator-controller-manager-7ffbcb7588-s6ftn\" (UID: \"5daf4cb7-d305-4408-97d8-9645cd4e61d5\") " pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.305874 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.311861 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sgd9\" (UniqueName: \"kubernetes.io/projected/bd3c97ad-17c7-47d7-ae5e-1a67c489c142-kube-api-access-5sgd9\") pod \"heat-operator-controller-manager-858f76bbdd-ncsms\" (UID: \"bd3c97ad-17c7-47d7-ae5e-1a67c489c142\") " pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.318864 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.320001 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.321964 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-vp7xx" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.328048 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vpd2\" (UniqueName: \"kubernetes.io/projected/de55f4c7-2413-4a00-8691-a9545525fc88-kube-api-access-4vpd2\") pod \"ironic-operator-controller-manager-9c5c78d49-srnhl\" (UID: \"de55f4c7-2413-4a00-8691-a9545525fc88\") " pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.328119 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn6vx\" (UniqueName: \"kubernetes.io/projected/5367fac7-2b9f-4745-b3cb-4accdf26ef59-kube-api-access-qn6vx\") pod \"manila-operator-controller-manager-5f67fbc655-6mlnb\" (UID: \"5367fac7-2b9f-4745-b3cb-4accdf26ef59\") " pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.328142 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv6zs\" (UniqueName: \"kubernetes.io/projected/b28d60cb-ff14-4b64-b7b0-3af252c60311-kube-api-access-cv6zs\") pod \"nova-operator-controller-manager-5df598886f-mcst5\" (UID: \"b28d60cb-ff14-4b64-b7b0-3af252c60311\") " pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.328178 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nqm9\" (UniqueName: \"kubernetes.io/projected/bf55d8b4-9315-48b2-962c-318911833b6f-kube-api-access-9nqm9\") pod \"keystone-operator-controller-manager-55b6b7c7b8-f57sl\" (UID: \"bf55d8b4-9315-48b2-962c-318911833b6f\") " pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.328204 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv2rj\" (UniqueName: \"kubernetes.io/projected/91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026-kube-api-access-lv2rj\") pod \"neutron-operator-controller-manager-79d585cb66-vxchx\" (UID: \"91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026\") " pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.328224 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxjw9\" (UniqueName: \"kubernetes.io/projected/a76ae989-9e97-43ee-a38f-ebb30be19ab6-kube-api-access-jxjw9\") pod \"mariadb-operator-controller-manager-f9fb45f8f-f2nxl\" (UID: \"a76ae989-9e97-43ee-a38f-ebb30be19ab6\") " pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.336956 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks6p6\" (UniqueName: \"kubernetes.io/projected/a096a4c5-5890-4100-8462-ec39d621ff38-kube-api-access-ks6p6\") pod \"infra-operator-controller-manager-656bcbd775-22k5w\" (UID: \"a096a4c5-5890-4100-8462-ec39d621ff38\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.340347 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.341355 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.344575 4664 generic.go:334] "Generic (PLEG): container finished" podID="89af999e-7634-484d-8c93-8a9abd790204" containerID="5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3" exitCode=0 Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.344901 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rc445" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="registry-server" containerID="cri-o://85e635836363e4798c3da2e76ef85fcb93b84152ac0c00e282324b159bc91730" gracePeriod=2 Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.344949 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c58kd" event={"ID":"89af999e-7634-484d-8c93-8a9abd790204","Type":"ContainerDied","Data":"5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3"} Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.344990 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c58kd" event={"ID":"89af999e-7634-484d-8c93-8a9abd790204","Type":"ContainerDied","Data":"1957b8dd8ff370f1f683e4cda3bcb97dd1266c90467412a8fef9333d8b848669"} Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.345009 4664 scope.go:117] "RemoveContainer" containerID="5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.345091 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c58kd" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.351182 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-z72hj" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.365934 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.367112 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.377930 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nqm9\" (UniqueName: \"kubernetes.io/projected/bf55d8b4-9315-48b2-962c-318911833b6f-kube-api-access-9nqm9\") pod \"keystone-operator-controller-manager-55b6b7c7b8-f57sl\" (UID: \"bf55d8b4-9315-48b2-962c-318911833b6f\") " pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.380498 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxjw9\" (UniqueName: \"kubernetes.io/projected/a76ae989-9e97-43ee-a38f-ebb30be19ab6-kube-api-access-jxjw9\") pod \"mariadb-operator-controller-manager-f9fb45f8f-f2nxl\" (UID: \"a76ae989-9e97-43ee-a38f-ebb30be19ab6\") " pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.381260 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.389453 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-mrqxv" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.392931 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.393056 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.400679 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vpd2\" (UniqueName: \"kubernetes.io/projected/de55f4c7-2413-4a00-8691-a9545525fc88-kube-api-access-4vpd2\") pod \"ironic-operator-controller-manager-9c5c78d49-srnhl\" (UID: \"de55f4c7-2413-4a00-8691-a9545525fc88\") " pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.400834 4664 scope.go:117] "RemoveContainer" containerID="cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.417985 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.419434 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn6vx\" (UniqueName: \"kubernetes.io/projected/5367fac7-2b9f-4745-b3cb-4accdf26ef59-kube-api-access-qn6vx\") pod \"manila-operator-controller-manager-5f67fbc655-6mlnb\" (UID: \"5367fac7-2b9f-4745-b3cb-4accdf26ef59\") " pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.422825 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.424844 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.425966 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.428878 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljdv5\" (UniqueName: \"kubernetes.io/projected/b371ef2b-6ffd-4759-8a02-279705b4a4d3-kube-api-access-ljdv5\") pod \"ovn-operator-controller-manager-79df5fb58c-jfsd4\" (UID: \"b371ef2b-6ffd-4759-8a02-279705b4a4d3\") " pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.428946 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv6zs\" (UniqueName: \"kubernetes.io/projected/b28d60cb-ff14-4b64-b7b0-3af252c60311-kube-api-access-cv6zs\") pod \"nova-operator-controller-manager-5df598886f-mcst5\" (UID: \"b28d60cb-ff14-4b64-b7b0-3af252c60311\") " pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.428980 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6vtv\" (UniqueName: \"kubernetes.io/projected/ccadedcb-9722-4c86-9b22-17d4f9ce1cd7-kube-api-access-h6vtv\") pod \"octavia-operator-controller-manager-69fdcfc5f5-nn6ql\" (UID: \"ccadedcb-9722-4c86-9b22-17d4f9ce1cd7\") " pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.429006 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv2rj\" (UniqueName: \"kubernetes.io/projected/91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026-kube-api-access-lv2rj\") pod \"neutron-operator-controller-manager-79d585cb66-vxchx\" (UID: \"91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026\") " pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.429381 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-zqw5s" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.432222 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.442234 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.453638 4664 scope.go:117] "RemoveContainer" containerID="3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.454455 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv6zs\" (UniqueName: \"kubernetes.io/projected/b28d60cb-ff14-4b64-b7b0-3af252c60311-kube-api-access-cv6zs\") pod \"nova-operator-controller-manager-5df598886f-mcst5\" (UID: \"b28d60cb-ff14-4b64-b7b0-3af252c60311\") " pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.464032 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv2rj\" (UniqueName: \"kubernetes.io/projected/91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026-kube-api-access-lv2rj\") pod \"neutron-operator-controller-manager-79d585cb66-vxchx\" (UID: \"91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026\") " pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.472334 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.486933 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.528279 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.530596 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6vtv\" (UniqueName: \"kubernetes.io/projected/ccadedcb-9722-4c86-9b22-17d4f9ce1cd7-kube-api-access-h6vtv\") pod \"octavia-operator-controller-manager-69fdcfc5f5-nn6ql\" (UID: \"ccadedcb-9722-4c86-9b22-17d4f9ce1cd7\") " pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.530632 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m6h2\" (UniqueName: \"kubernetes.io/projected/0e469825-8f36-48dc-8dce-dcbdaf6ca58c-kube-api-access-4m6h2\") pod \"swift-operator-controller-manager-db6d7f97b-6qvrk\" (UID: \"0e469825-8f36-48dc-8dce-dcbdaf6ca58c\") " pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.530655 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfgv9\" (UniqueName: \"kubernetes.io/projected/24e25e0a-a138-41fb-b90e-08d800f751b4-kube-api-access-mfgv9\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7b75c7q\" (UID: \"24e25e0a-a138-41fb-b90e-08d800f751b4\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.530725 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dh27\" (UniqueName: \"kubernetes.io/projected/86b8e5cb-18d9-4931-afb3-4f8dc9f788f0-kube-api-access-8dh27\") pod \"placement-operator-controller-manager-68b6c87b68-fl4ff\" (UID: \"86b8e5cb-18d9-4931-afb3-4f8dc9f788f0\") " pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.530747 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/24e25e0a-a138-41fb-b90e-08d800f751b4-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7b75c7q\" (UID: \"24e25e0a-a138-41fb-b90e-08d800f751b4\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.530766 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljdv5\" (UniqueName: \"kubernetes.io/projected/b371ef2b-6ffd-4759-8a02-279705b4a4d3-kube-api-access-ljdv5\") pod \"ovn-operator-controller-manager-79df5fb58c-jfsd4\" (UID: \"b371ef2b-6ffd-4759-8a02-279705b4a4d3\") " pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.531052 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.531229 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.554178 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.555757 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.567454 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljdv5\" (UniqueName: \"kubernetes.io/projected/b371ef2b-6ffd-4759-8a02-279705b4a4d3-kube-api-access-ljdv5\") pod \"ovn-operator-controller-manager-79df5fb58c-jfsd4\" (UID: \"b371ef2b-6ffd-4759-8a02-279705b4a4d3\") " pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.570468 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-z52wm" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.572621 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.583487 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.613095 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6vtv\" (UniqueName: \"kubernetes.io/projected/ccadedcb-9722-4c86-9b22-17d4f9ce1cd7-kube-api-access-h6vtv\") pod \"octavia-operator-controller-manager-69fdcfc5f5-nn6ql\" (UID: \"ccadedcb-9722-4c86-9b22-17d4f9ce1cd7\") " pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.619139 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.620383 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.620846 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.627374 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-qpfjt" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.634716 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m6h2\" (UniqueName: \"kubernetes.io/projected/0e469825-8f36-48dc-8dce-dcbdaf6ca58c-kube-api-access-4m6h2\") pod \"swift-operator-controller-manager-db6d7f97b-6qvrk\" (UID: \"0e469825-8f36-48dc-8dce-dcbdaf6ca58c\") " pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.634745 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfgv9\" (UniqueName: \"kubernetes.io/projected/24e25e0a-a138-41fb-b90e-08d800f751b4-kube-api-access-mfgv9\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7b75c7q\" (UID: \"24e25e0a-a138-41fb-b90e-08d800f751b4\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.634788 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dh27\" (UniqueName: \"kubernetes.io/projected/86b8e5cb-18d9-4931-afb3-4f8dc9f788f0-kube-api-access-8dh27\") pod \"placement-operator-controller-manager-68b6c87b68-fl4ff\" (UID: \"86b8e5cb-18d9-4931-afb3-4f8dc9f788f0\") " pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.634847 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/24e25e0a-a138-41fb-b90e-08d800f751b4-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7b75c7q\" (UID: \"24e25e0a-a138-41fb-b90e-08d800f751b4\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.635365 4664 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.635398 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24e25e0a-a138-41fb-b90e-08d800f751b4-cert podName:24e25e0a-a138-41fb-b90e-08d800f751b4 nodeName:}" failed. No retries permitted until 2025-10-13 07:00:17.135386199 +0000 UTC m=+824.822831381 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/24e25e0a-a138-41fb-b90e-08d800f751b4-cert") pod "openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" (UID: "24e25e0a-a138-41fb-b90e-08d800f751b4") : secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.640388 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.640677 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.656889 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.656927 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.657541 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.657930 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.667038 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-pdfm7" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.673511 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.700592 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfgv9\" (UniqueName: \"kubernetes.io/projected/24e25e0a-a138-41fb-b90e-08d800f751b4-kube-api-access-mfgv9\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7b75c7q\" (UID: \"24e25e0a-a138-41fb-b90e-08d800f751b4\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.702644 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dh27\" (UniqueName: \"kubernetes.io/projected/86b8e5cb-18d9-4931-afb3-4f8dc9f788f0-kube-api-access-8dh27\") pod \"placement-operator-controller-manager-68b6c87b68-fl4ff\" (UID: \"86b8e5cb-18d9-4931-afb3-4f8dc9f788f0\") " pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.716645 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m6h2\" (UniqueName: \"kubernetes.io/projected/0e469825-8f36-48dc-8dce-dcbdaf6ca58c-kube-api-access-4m6h2\") pod \"swift-operator-controller-manager-db6d7f97b-6qvrk\" (UID: \"0e469825-8f36-48dc-8dce-dcbdaf6ca58c\") " pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.716785 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c58kd"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.728253 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-c58kd"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.738078 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.740669 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.741425 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a096a4c5-5890-4100-8462-ec39d621ff38-cert\") pod \"infra-operator-controller-manager-656bcbd775-22k5w\" (UID: \"a096a4c5-5890-4100-8462-ec39d621ff38\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.741481 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6smqn\" (UniqueName: \"kubernetes.io/projected/b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07-kube-api-access-6smqn\") pod \"telemetry-operator-controller-manager-67cfc6749b-8mv7j\" (UID: \"b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07\") " pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.741516 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx9w9\" (UniqueName: \"kubernetes.io/projected/f837553e-b572-4dcc-91b4-a8e6c2deb097-kube-api-access-zx9w9\") pod \"test-operator-controller-manager-5458f77c4-7vhzw\" (UID: \"f837553e-b572-4dcc-91b4-a8e6c2deb097\") " pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.762935 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.763045 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-vrkr7" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.763197 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.775397 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a096a4c5-5890-4100-8462-ec39d621ff38-cert\") pod \"infra-operator-controller-manager-656bcbd775-22k5w\" (UID: \"a096a4c5-5890-4100-8462-ec39d621ff38\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.872123 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx9w9\" (UniqueName: \"kubernetes.io/projected/f837553e-b572-4dcc-91b4-a8e6c2deb097-kube-api-access-zx9w9\") pod \"test-operator-controller-manager-5458f77c4-7vhzw\" (UID: \"f837553e-b572-4dcc-91b4-a8e6c2deb097\") " pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.872217 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdpdd\" (UniqueName: \"kubernetes.io/projected/6647aa13-3608-4eeb-87b7-26741b9c2a6f-kube-api-access-jdpdd\") pod \"openstack-operator-controller-manager-5b95c8954b-w8mlm\" (UID: \"6647aa13-3608-4eeb-87b7-26741b9c2a6f\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.872300 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dbnf\" (UniqueName: \"kubernetes.io/projected/3ae920d7-c605-4984-a072-dad04b3cc6cc-kube-api-access-8dbnf\") pod \"watcher-operator-controller-manager-7f554bff7b-798km\" (UID: \"3ae920d7-c605-4984-a072-dad04b3cc6cc\") " pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.872323 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6647aa13-3608-4eeb-87b7-26741b9c2a6f-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-w8mlm\" (UID: \"6647aa13-3608-4eeb-87b7-26741b9c2a6f\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.872348 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6smqn\" (UniqueName: \"kubernetes.io/projected/b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07-kube-api-access-6smqn\") pod \"telemetry-operator-controller-manager-67cfc6749b-8mv7j\" (UID: \"b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07\") " pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.876270 4664 scope.go:117] "RemoveContainer" containerID="5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3" Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.877773 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3\": container with ID starting with 5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3 not found: ID does not exist" containerID="5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.877868 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3"} err="failed to get container status \"5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3\": rpc error: code = NotFound desc = could not find container \"5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3\": container with ID starting with 5fb3e0e759be64d338b5acada1c21bc9ec807e4cd76fd1ef0564ca8cf716a4e3 not found: ID does not exist" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.878013 4664 scope.go:117] "RemoveContainer" containerID="cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0" Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.878383 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0\": container with ID starting with cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0 not found: ID does not exist" containerID="cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.878421 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0"} err="failed to get container status \"cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0\": rpc error: code = NotFound desc = could not find container \"cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0\": container with ID starting with cad371a2af4db15ae7d6e1f68fe795623f02f5f3e6575ee0d83f903e71a9bef0 not found: ID does not exist" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.878446 4664 scope.go:117] "RemoveContainer" containerID="3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.886271 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx"] Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.888391 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.902564 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx"] Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.908439 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4\": container with ID starting with 3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4 not found: ID does not exist" containerID="3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.908487 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4"} err="failed to get container status \"3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4\": rpc error: code = NotFound desc = could not find container \"3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4\": container with ID starting with 3aed825491812c5fb890bc61274913f5df2d7d4d3b7bdd27a8515fa8480591d4 not found: ID does not exist" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.908998 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-4dmvb" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.921506 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx9w9\" (UniqueName: \"kubernetes.io/projected/f837553e-b572-4dcc-91b4-a8e6c2deb097-kube-api-access-zx9w9\") pod \"test-operator-controller-manager-5458f77c4-7vhzw\" (UID: \"f837553e-b572-4dcc-91b4-a8e6c2deb097\") " pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.922003 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6smqn\" (UniqueName: \"kubernetes.io/projected/b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07-kube-api-access-6smqn\") pod \"telemetry-operator-controller-manager-67cfc6749b-8mv7j\" (UID: \"b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07\") " pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.929201 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.957400 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.963678 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.973532 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdpdd\" (UniqueName: \"kubernetes.io/projected/6647aa13-3608-4eeb-87b7-26741b9c2a6f-kube-api-access-jdpdd\") pod \"openstack-operator-controller-manager-5b95c8954b-w8mlm\" (UID: \"6647aa13-3608-4eeb-87b7-26741b9c2a6f\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.973624 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dbnf\" (UniqueName: \"kubernetes.io/projected/3ae920d7-c605-4984-a072-dad04b3cc6cc-kube-api-access-8dbnf\") pod \"watcher-operator-controller-manager-7f554bff7b-798km\" (UID: \"3ae920d7-c605-4984-a072-dad04b3cc6cc\") " pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.973647 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6647aa13-3608-4eeb-87b7-26741b9c2a6f-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-w8mlm\" (UID: \"6647aa13-3608-4eeb-87b7-26741b9c2a6f\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.973762 4664 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Oct 13 07:00:16 crc kubenswrapper[4664]: E1013 07:00:16.973883 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6647aa13-3608-4eeb-87b7-26741b9c2a6f-cert podName:6647aa13-3608-4eeb-87b7-26741b9c2a6f nodeName:}" failed. No retries permitted until 2025-10-13 07:00:17.4738672 +0000 UTC m=+825.161312402 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6647aa13-3608-4eeb-87b7-26741b9c2a6f-cert") pod "openstack-operator-controller-manager-5b95c8954b-w8mlm" (UID: "6647aa13-3608-4eeb-87b7-26741b9c2a6f") : secret "webhook-server-cert" not found Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.986605 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" Oct 13 07:00:16 crc kubenswrapper[4664]: I1013 07:00:16.995462 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdpdd\" (UniqueName: \"kubernetes.io/projected/6647aa13-3608-4eeb-87b7-26741b9c2a6f-kube-api-access-jdpdd\") pod \"openstack-operator-controller-manager-5b95c8954b-w8mlm\" (UID: \"6647aa13-3608-4eeb-87b7-26741b9c2a6f\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.004575 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dbnf\" (UniqueName: \"kubernetes.io/projected/3ae920d7-c605-4984-a072-dad04b3cc6cc-kube-api-access-8dbnf\") pod \"watcher-operator-controller-manager-7f554bff7b-798km\" (UID: \"3ae920d7-c605-4984-a072-dad04b3cc6cc\") " pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.016559 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.029158 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.074660 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69jpz\" (UniqueName: \"kubernetes.io/projected/3ee22f32-c675-449d-bdb9-670673cf57b4-kube-api-access-69jpz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx\" (UID: \"3ee22f32-c675-449d-bdb9-670673cf57b4\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.101282 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89af999e-7634-484d-8c93-8a9abd790204" path="/var/lib/kubelet/pods/89af999e-7634-484d-8c93-8a9abd790204/volumes" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.179075 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69jpz\" (UniqueName: \"kubernetes.io/projected/3ee22f32-c675-449d-bdb9-670673cf57b4-kube-api-access-69jpz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx\" (UID: \"3ee22f32-c675-449d-bdb9-670673cf57b4\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.179193 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/24e25e0a-a138-41fb-b90e-08d800f751b4-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7b75c7q\" (UID: \"24e25e0a-a138-41fb-b90e-08d800f751b4\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.200772 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/24e25e0a-a138-41fb-b90e-08d800f751b4-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7b75c7q\" (UID: \"24e25e0a-a138-41fb-b90e-08d800f751b4\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.204593 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69jpz\" (UniqueName: \"kubernetes.io/projected/3ee22f32-c675-449d-bdb9-670673cf57b4-kube-api-access-69jpz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx\" (UID: \"3ee22f32-c675-449d-bdb9-670673cf57b4\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.238844 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.304764 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.370452 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq"] Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.384135 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm"] Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.406371 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl"] Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.422708 4664 generic.go:334] "Generic (PLEG): container finished" podID="73874794-d495-48e3-9087-8c37dc2dd70c" containerID="85e635836363e4798c3da2e76ef85fcb93b84152ac0c00e282324b159bc91730" exitCode=0 Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.422771 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rc445" event={"ID":"73874794-d495-48e3-9087-8c37dc2dd70c","Type":"ContainerDied","Data":"85e635836363e4798c3da2e76ef85fcb93b84152ac0c00e282324b159bc91730"} Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.490358 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6647aa13-3608-4eeb-87b7-26741b9c2a6f-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-w8mlm\" (UID: \"6647aa13-3608-4eeb-87b7-26741b9c2a6f\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.496283 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6647aa13-3608-4eeb-87b7-26741b9c2a6f-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-w8mlm\" (UID: \"6647aa13-3608-4eeb-87b7-26741b9c2a6f\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.566826 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2"] Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.596856 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb"] Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.656494 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.669352 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx"] Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.706166 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:17 crc kubenswrapper[4664]: W1013 07:00:17.709991 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91d09e1b_c6e8_4a4b_a4a6_ecb48fa42026.slice/crio-4cca023d79d2c6064085461202ade5c7ec7adc0d924e60685edf50549d1c1dc1 WatchSource:0}: Error finding container 4cca023d79d2c6064085461202ade5c7ec7adc0d924e60685edf50549d1c1dc1: Status 404 returned error can't find the container with id 4cca023d79d2c6064085461202ade5c7ec7adc0d924e60685edf50549d1c1dc1 Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.800680 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-utilities\") pod \"73874794-d495-48e3-9087-8c37dc2dd70c\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.800718 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rd6b2\" (UniqueName: \"kubernetes.io/projected/73874794-d495-48e3-9087-8c37dc2dd70c-kube-api-access-rd6b2\") pod \"73874794-d495-48e3-9087-8c37dc2dd70c\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.800741 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-catalog-content\") pod \"73874794-d495-48e3-9087-8c37dc2dd70c\" (UID: \"73874794-d495-48e3-9087-8c37dc2dd70c\") " Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.805247 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-utilities" (OuterVolumeSpecName: "utilities") pod "73874794-d495-48e3-9087-8c37dc2dd70c" (UID: "73874794-d495-48e3-9087-8c37dc2dd70c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.814756 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73874794-d495-48e3-9087-8c37dc2dd70c-kube-api-access-rd6b2" (OuterVolumeSpecName: "kube-api-access-rd6b2") pod "73874794-d495-48e3-9087-8c37dc2dd70c" (UID: "73874794-d495-48e3-9087-8c37dc2dd70c"). InnerVolumeSpecName "kube-api-access-rd6b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.902694 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.902728 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rd6b2\" (UniqueName: \"kubernetes.io/projected/73874794-d495-48e3-9087-8c37dc2dd70c-kube-api-access-rd6b2\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.953190 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt"] Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.957004 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl"] Oct 13 07:00:17 crc kubenswrapper[4664]: W1013 07:00:17.958986 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f6b6a3e_5706_49fa_aafa_49f68b19997e.slice/crio-64e6d9804ed7fa974c1d113c889936962e038829b4aa5e6ece38a7c616cde29d WatchSource:0}: Error finding container 64e6d9804ed7fa974c1d113c889936962e038829b4aa5e6ece38a7c616cde29d: Status 404 returned error can't find the container with id 64e6d9804ed7fa974c1d113c889936962e038829b4aa5e6ece38a7c616cde29d Oct 13 07:00:17 crc kubenswrapper[4664]: W1013 07:00:17.963033 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda76ae989_9e97_43ee_a38f_ebb30be19ab6.slice/crio-4fb9dc08647bd5870c06739d916f97826a779d6b34c2d42779022293e5b34e58 WatchSource:0}: Error finding container 4fb9dc08647bd5870c06739d916f97826a779d6b34c2d42779022293e5b34e58: Status 404 returned error can't find the container with id 4fb9dc08647bd5870c06739d916f97826a779d6b34c2d42779022293e5b34e58 Oct 13 07:00:17 crc kubenswrapper[4664]: I1013 07:00:17.970786 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "73874794-d495-48e3-9087-8c37dc2dd70c" (UID: "73874794-d495-48e3-9087-8c37dc2dd70c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.004539 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73874794-d495-48e3-9087-8c37dc2dd70c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.186486 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.397258 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.406086 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.423095 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.446117 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" event={"ID":"bd3c97ad-17c7-47d7-ae5e-1a67c489c142","Type":"ContainerStarted","Data":"820a5110c9c66a7c11f65708b25ac12e91512096f349aecaa746061ac298f2ce"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.448142 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" event={"ID":"68c3d701-56d2-4bce-8c6e-e1894084fecf","Type":"ContainerStarted","Data":"adba6bc544b57d1ec358274cfcaf0ff916d5a4217f57e935d26481bac7d3a4d1"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.452951 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" event={"ID":"a76ae989-9e97-43ee-a38f-ebb30be19ab6","Type":"ContainerStarted","Data":"4fb9dc08647bd5870c06739d916f97826a779d6b34c2d42779022293e5b34e58"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.456387 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5df598886f-mcst5"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.461592 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.474167 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" event={"ID":"5367fac7-2b9f-4745-b3cb-4accdf26ef59","Type":"ContainerStarted","Data":"5dc5c0a13f468f05e622408937759e17e22ed7b9fb3c3232d81ac38df05d7cb2"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.477574 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" event={"ID":"9f6b6a3e-5706-49fa-aafa-49f68b19997e","Type":"ContainerStarted","Data":"64e6d9804ed7fa974c1d113c889936962e038829b4aa5e6ece38a7c616cde29d"} Oct 13 07:00:18 crc kubenswrapper[4664]: W1013 07:00:18.478195 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb371ef2b_6ffd_4759_8a02_279705b4a4d3.slice/crio-deb569caf10ccf22d2e64919fbf2853c4c766d8cda8461c50bfc4106a4532a05 WatchSource:0}: Error finding container deb569caf10ccf22d2e64919fbf2853c4c766d8cda8461c50bfc4106a4532a05: Status 404 returned error can't find the container with id deb569caf10ccf22d2e64919fbf2853c4c766d8cda8461c50bfc4106a4532a05 Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.482490 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" event={"ID":"de55f4c7-2413-4a00-8691-a9545525fc88","Type":"ContainerStarted","Data":"1d2e68c4d3a8347061c0cb3f958be318c7a745bfe601fd3bac77372911e98c8c"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.527388 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rc445" event={"ID":"73874794-d495-48e3-9087-8c37dc2dd70c","Type":"ContainerDied","Data":"bb029e5323ebe14504044ba386bad68a245ef287ddc3004b5b7f732eb09768ed"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.527437 4664 scope.go:117] "RemoveContainer" containerID="85e635836363e4798c3da2e76ef85fcb93b84152ac0c00e282324b159bc91730" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.527520 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rc445" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.543809 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" event={"ID":"3c368f1e-93f0-440a-ad95-d205dd78e4b2","Type":"ContainerStarted","Data":"2a553f5b38068e709cc78b29a2cc3468aa31c3fe857af7e0235671d13127ef51"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.548185 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" event={"ID":"91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026","Type":"ContainerStarted","Data":"4cca023d79d2c6064085461202ade5c7ec7adc0d924e60685edf50549d1c1dc1"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.563665 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" event={"ID":"bf55d8b4-9315-48b2-962c-318911833b6f","Type":"ContainerStarted","Data":"be0cedeab980902d36d2d121b353989ccb0954e9dbf9f11a72fd4d2df6bd73a7"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.580390 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" event={"ID":"ccadedcb-9722-4c86-9b22-17d4f9ce1cd7","Type":"ContainerStarted","Data":"a8fc1f22ea6417bd6172cb62d54b4e52c61aa4809cbe146fbccdce078ae0ecd2"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.581725 4664 scope.go:117] "RemoveContainer" containerID="0f2419776bf92ada04a229d8247084990e42b7cf0f79a8892762f12131dedfb9" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.585594 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.591917 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rc445"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.593300 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" event={"ID":"deda5c15-ffb4-44e6-9e27-465106737111","Type":"ContainerStarted","Data":"410a04526928c2bfb40d0fb26b7adc91f0ca0dcc6a11b4c34ad09c20b071b93c"} Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.612114 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rc445"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.631601 4664 scope.go:117] "RemoveContainer" containerID="1e38813547cb4d3edcb82c3d1c5e2388bd9bd5dc0dc591548b09cd44cf2a92ff" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.653774 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.664145 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km"] Oct 13 07:00:18 crc kubenswrapper[4664]: W1013 07:00:18.676487 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ae920d7_c605_4984_a072_dad04b3cc6cc.slice/crio-1510210f1a7ccca1f8d2b5a265f95713aac91a34874e4575e4ff7d0706395e53 WatchSource:0}: Error finding container 1510210f1a7ccca1f8d2b5a265f95713aac91a34874e4575e4ff7d0706395e53: Status 404 returned error can't find the container with id 1510210f1a7ccca1f8d2b5a265f95713aac91a34874e4575e4ff7d0706395e53 Oct 13 07:00:18 crc kubenswrapper[4664]: E1013 07:00:18.717587 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ks6p6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-656bcbd775-22k5w_openstack-operators(a096a4c5-5890-4100-8462-ec39d621ff38): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.736514 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff"] Oct 13 07:00:18 crc kubenswrapper[4664]: E1013 07:00:18.740102 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zx9w9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5458f77c4-7vhzw_openstack-operators(f837553e-b572-4dcc-91b4-a8e6c2deb097): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 13 07:00:18 crc kubenswrapper[4664]: E1013 07:00:18.781459 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d33c1f507e1f5b9a4bf226ad98917e92101ac66b36e19d35cbe04ae7014f6bff,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8dh27,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-68b6c87b68-fl4ff_openstack-operators(86b8e5cb-18d9-4931-afb3-4f8dc9f788f0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 13 07:00:18 crc kubenswrapper[4664]: E1013 07:00:18.795002 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-69jpz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx_openstack-operators(3ee22f32-c675-449d-bdb9-670673cf57b4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 13 07:00:18 crc kubenswrapper[4664]: E1013 07:00:18.796451 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" podUID="3ee22f32-c675-449d-bdb9-670673cf57b4" Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.809639 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.818657 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.875718 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.899065 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk"] Oct 13 07:00:18 crc kubenswrapper[4664]: I1013 07:00:18.907946 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm"] Oct 13 07:00:18 crc kubenswrapper[4664]: E1013 07:00:18.930218 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:4b4a17fe08ce00e375afaaec6a28835f5c1784f03d11c4558376ac04130f3a9e,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4m6h2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-db6d7f97b-6qvrk_openstack-operators(0e469825-8f36-48dc-8dce-dcbdaf6ca58c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.012739 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" podUID="f837553e-b572-4dcc-91b4-a8e6c2deb097" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.060445 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" path="/var/lib/kubelet/pods/73874794-d495-48e3-9087-8c37dc2dd70c/volumes" Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.072751 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" podUID="86b8e5cb-18d9-4931-afb3-4f8dc9f788f0" Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.092418 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.152371 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" podUID="0e469825-8f36-48dc-8dce-dcbdaf6ca58c" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.606920 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" event={"ID":"b371ef2b-6ffd-4759-8a02-279705b4a4d3","Type":"ContainerStarted","Data":"deb569caf10ccf22d2e64919fbf2853c4c766d8cda8461c50bfc4106a4532a05"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.635585 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" event={"ID":"f837553e-b572-4dcc-91b4-a8e6c2deb097","Type":"ContainerStarted","Data":"c71b6cdaf213edc33a908d81ab629e9385cff691d0dc66992ac91f7e7ec0862e"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.635630 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" event={"ID":"f837553e-b572-4dcc-91b4-a8e6c2deb097","Type":"ContainerStarted","Data":"0b9decb98d920f890aefcedf147d9624d809710e9eeabf959a93fe7309bc274a"} Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.637750 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a\\\"\"" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" podUID="f837553e-b572-4dcc-91b4-a8e6c2deb097" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.638254 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" event={"ID":"3ae920d7-c605-4984-a072-dad04b3cc6cc","Type":"ContainerStarted","Data":"1510210f1a7ccca1f8d2b5a265f95713aac91a34874e4575e4ff7d0706395e53"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.651407 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" event={"ID":"3ee22f32-c675-449d-bdb9-670673cf57b4","Type":"ContainerStarted","Data":"24f804011c44f054c1a4cafcd5bea4209987d857fb6ec7655d86651b9e86aa3e"} Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.664504 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" podUID="3ee22f32-c675-449d-bdb9-670673cf57b4" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.671908 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" event={"ID":"b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07","Type":"ContainerStarted","Data":"8cd6142454f334e1a1324cb80c1784974783ae62ed7a5de5e46a28d8a3a59a17"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.673212 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" event={"ID":"0e469825-8f36-48dc-8dce-dcbdaf6ca58c","Type":"ContainerStarted","Data":"7b6219c6a7978698b0ee7d91755dda97d25ec85317537f6fef016217f5599229"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.673236 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" event={"ID":"0e469825-8f36-48dc-8dce-dcbdaf6ca58c","Type":"ContainerStarted","Data":"30457a0cd8803fd0e107704e10aa6d3346e9f18bbd110a566096a7645ed43031"} Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.674906 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:4b4a17fe08ce00e375afaaec6a28835f5c1784f03d11c4558376ac04130f3a9e\\\"\"" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" podUID="0e469825-8f36-48dc-8dce-dcbdaf6ca58c" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.679424 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" event={"ID":"24e25e0a-a138-41fb-b90e-08d800f751b4","Type":"ContainerStarted","Data":"da0d27ea6e8d87c09fff6f32a645fd5a81fe998b33c2480ea5e3873ce8742974"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.693944 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" event={"ID":"86b8e5cb-18d9-4931-afb3-4f8dc9f788f0","Type":"ContainerStarted","Data":"41dcadc11ff85262c5a371ed8fc453965d2f78c237be3c311b10487ddd3e6395"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.693988 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" event={"ID":"86b8e5cb-18d9-4931-afb3-4f8dc9f788f0","Type":"ContainerStarted","Data":"5a325e0ff0a6a616fa42edc9387dabd3222e0c222889cb3711362ec90ff20a53"} Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.699150 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d33c1f507e1f5b9a4bf226ad98917e92101ac66b36e19d35cbe04ae7014f6bff\\\"\"" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" podUID="86b8e5cb-18d9-4931-afb3-4f8dc9f788f0" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.702623 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" event={"ID":"b28d60cb-ff14-4b64-b7b0-3af252c60311","Type":"ContainerStarted","Data":"527281acc08027043c855b827af36d2d4c1cd9f98e16fdca5e99fb96563f63c4"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.710590 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" event={"ID":"a096a4c5-5890-4100-8462-ec39d621ff38","Type":"ContainerStarted","Data":"5a7108efd3219bba5e72928af5c3a7e033a0dec6081a401c748c4ad66b207fc9"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.710636 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" event={"ID":"a096a4c5-5890-4100-8462-ec39d621ff38","Type":"ContainerStarted","Data":"0b6806591d6fe58c03e19a1221bc94c1cdac06bc1040faff41da53cf7afacd74"} Oct 13 07:00:19 crc kubenswrapper[4664]: E1013 07:00:19.716880 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492\\\"\"" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.727709 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" event={"ID":"6647aa13-3608-4eeb-87b7-26741b9c2a6f","Type":"ContainerStarted","Data":"35dfb4145e98cd772fcae485663bfc51e3e29ce40470bbe7c4d11694d4de09b8"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.727755 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" event={"ID":"6647aa13-3608-4eeb-87b7-26741b9c2a6f","Type":"ContainerStarted","Data":"a22c9929702b7bd56b8164b0c633e00c55979b7cf0514c123839be916b99c3f0"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.727766 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" event={"ID":"6647aa13-3608-4eeb-87b7-26741b9c2a6f","Type":"ContainerStarted","Data":"e9e9a7b4824adfb84215eaca1492b3af63c7392a2cbde34658562a1323dd5742"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.728608 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.729549 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" event={"ID":"5daf4cb7-d305-4408-97d8-9645cd4e61d5","Type":"ContainerStarted","Data":"5ef67ead9947ebe151bf67b7f075bc4617e918e798198389977147554de52b75"} Oct 13 07:00:19 crc kubenswrapper[4664]: I1013 07:00:19.975543 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" podStartSLOduration=3.975524654 podStartE2EDuration="3.975524654s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:00:19.969994607 +0000 UTC m=+827.657439819" watchObservedRunningTime="2025-10-13 07:00:19.975524654 +0000 UTC m=+827.662969846" Oct 13 07:00:20 crc kubenswrapper[4664]: E1013 07:00:20.785781 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492\\\"\"" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" Oct 13 07:00:20 crc kubenswrapper[4664]: E1013 07:00:20.785901 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:4b4a17fe08ce00e375afaaec6a28835f5c1784f03d11c4558376ac04130f3a9e\\\"\"" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" podUID="0e469825-8f36-48dc-8dce-dcbdaf6ca58c" Oct 13 07:00:20 crc kubenswrapper[4664]: E1013 07:00:20.785944 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" podUID="3ee22f32-c675-449d-bdb9-670673cf57b4" Oct 13 07:00:20 crc kubenswrapper[4664]: E1013 07:00:20.787152 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a\\\"\"" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" podUID="f837553e-b572-4dcc-91b4-a8e6c2deb097" Oct 13 07:00:20 crc kubenswrapper[4664]: E1013 07:00:20.787185 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d33c1f507e1f5b9a4bf226ad98917e92101ac66b36e19d35cbe04ae7014f6bff\\\"\"" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" podUID="86b8e5cb-18d9-4931-afb3-4f8dc9f788f0" Oct 13 07:00:27 crc kubenswrapper[4664]: I1013 07:00:27.717602 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.812109 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.812461 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.812524 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.813437 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"493f5a79780ab2c8a05a8d3a586ef54fbc0335841ed7372ce3fb36a1348a27b5"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.813565 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://493f5a79780ab2c8a05a8d3a586ef54fbc0335841ed7372ce3fb36a1348a27b5" gracePeriod=600 Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.969120 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="493f5a79780ab2c8a05a8d3a586ef54fbc0335841ed7372ce3fb36a1348a27b5" exitCode=0 Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.969698 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"493f5a79780ab2c8a05a8d3a586ef54fbc0335841ed7372ce3fb36a1348a27b5"} Oct 13 07:00:28 crc kubenswrapper[4664]: I1013 07:00:28.970469 4664 scope.go:117] "RemoveContainer" containerID="8bc911ca0dfde386d072e0e4fe8a1821b2571d5b4a57fb451b33ca409337c948" Oct 13 07:00:33 crc kubenswrapper[4664]: E1013 07:00:33.395891 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:ec11cb8711bd1af22db3c84aa854349ee46191add3db45aecfabb1d8410c04d0" Oct 13 07:00:33 crc kubenswrapper[4664]: E1013 07:00:33.396362 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:ec11cb8711bd1af22db3c84aa854349ee46191add3db45aecfabb1d8410c04d0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5sgd9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-858f76bbdd-ncsms_openstack-operators(bd3c97ad-17c7-47d7-ae5e-1a67c489c142): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:00:33 crc kubenswrapper[4664]: E1013 07:00:33.960211 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:33652e75a03a058769019fe8d8c51585a6eeefef5e1ecb96f9965434117954f2" Oct 13 07:00:33 crc kubenswrapper[4664]: E1013 07:00:33.960683 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:33652e75a03a058769019fe8d8c51585a6eeefef5e1ecb96f9965434117954f2,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lv2rj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-79d585cb66-vxchx_openstack-operators(91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:00:34 crc kubenswrapper[4664]: E1013 07:00:34.911258 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:98a5233f0596591acdf2c6a5838b08be108787cdb6ad1995b2b7886bac0fe6ca" Oct 13 07:00:34 crc kubenswrapper[4664]: E1013 07:00:34.911552 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:98a5233f0596591acdf2c6a5838b08be108787cdb6ad1995b2b7886bac0fe6ca,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8dbnf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-7f554bff7b-798km_openstack-operators(3ae920d7-c605-4984-a072-dad04b3cc6cc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:00:36 crc kubenswrapper[4664]: E1013 07:00:36.308786 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:47278ed28e02df00892f941763aa0d69547327318e8a983e07f4577acd288167" Oct 13 07:00:36 crc kubenswrapper[4664]: E1013 07:00:36.309275 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:47278ed28e02df00892f941763aa0d69547327318e8a983e07f4577acd288167,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jxjw9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-f9fb45f8f-f2nxl_openstack-operators(a76ae989-9e97-43ee-a38f-ebb30be19ab6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:00:36 crc kubenswrapper[4664]: E1013 07:00:36.973300 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:063a7e65b4ba98f0506f269ff7525b446eae06a5ed4a61c18ffa33a886500867" Oct 13 07:00:36 crc kubenswrapper[4664]: E1013 07:00:36.973609 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:063a7e65b4ba98f0506f269ff7525b446eae06a5ed4a61c18ffa33a886500867,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9xcxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-7ffbcb7588-s6ftn_openstack-operators(5daf4cb7-d305-4408-97d8-9645cd4e61d5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:00:42 crc kubenswrapper[4664]: E1013 07:00:42.399483 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" podUID="bd3c97ad-17c7-47d7-ae5e-1a67c489c142" Oct 13 07:00:42 crc kubenswrapper[4664]: E1013 07:00:42.400086 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" podUID="a76ae989-9e97-43ee-a38f-ebb30be19ab6" Oct 13 07:00:42 crc kubenswrapper[4664]: E1013 07:00:42.461948 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" podUID="5daf4cb7-d305-4408-97d8-9645cd4e61d5" Oct 13 07:00:42 crc kubenswrapper[4664]: E1013 07:00:42.715104 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" podUID="3ae920d7-c605-4984-a072-dad04b3cc6cc" Oct 13 07:00:42 crc kubenswrapper[4664]: E1013 07:00:42.725035 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" podUID="91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026" Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.104731 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" event={"ID":"b371ef2b-6ffd-4759-8a02-279705b4a4d3","Type":"ContainerStarted","Data":"1f1853d3519c9dd69ca1ff9fc1dd636ebf78044a7d21f447afa0788a9b9faa77"} Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.104767 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" event={"ID":"b371ef2b-6ffd-4759-8a02-279705b4a4d3","Type":"ContainerStarted","Data":"21249b0a7280bbacc706f2109ae747bc89e9fefb08c0db3a82bad4a0844d58b0"} Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.105789 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.117058 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" event={"ID":"91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026","Type":"ContainerStarted","Data":"2090453ce30b8f455957a88814f44c5d9e639979e75f6962f926d2b0766fc67e"} Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.126634 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" event={"ID":"9f6b6a3e-5706-49fa-aafa-49f68b19997e","Type":"ContainerStarted","Data":"ae4ca4b144492899668ae18121e30708095346339ae471c87fe60d6a8e6b96e8"} Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.130918 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" event={"ID":"a76ae989-9e97-43ee-a38f-ebb30be19ab6","Type":"ContainerStarted","Data":"cdb2e8f4d16b2b0b9238f2ae67a8d19d85e6951d6f7b5074287a0e14b2f81b99"} Oct 13 07:00:43 crc kubenswrapper[4664]: E1013 07:00:43.132471 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:47278ed28e02df00892f941763aa0d69547327318e8a983e07f4577acd288167\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" podUID="a76ae989-9e97-43ee-a38f-ebb30be19ab6" Oct 13 07:00:43 crc kubenswrapper[4664]: E1013 07:00:43.132631 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:33652e75a03a058769019fe8d8c51585a6eeefef5e1ecb96f9965434117954f2\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" podUID="91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026" Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.142737 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" podStartSLOduration=7.090477594 podStartE2EDuration="27.142725394s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.483819044 +0000 UTC m=+826.171264236" lastFinishedPulling="2025-10-13 07:00:38.536066834 +0000 UTC m=+846.223512036" observedRunningTime="2025-10-13 07:00:43.139865998 +0000 UTC m=+850.827311200" watchObservedRunningTime="2025-10-13 07:00:43.142725394 +0000 UTC m=+850.830170586" Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.143103 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" event={"ID":"5daf4cb7-d305-4408-97d8-9645cd4e61d5","Type":"ContainerStarted","Data":"f6f1dcb932eaa02ee08d2e789b3eed80c0fa0e1c922cab1338e4d1b3f3cf97eb"} Oct 13 07:00:43 crc kubenswrapper[4664]: E1013 07:00:43.157177 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:063a7e65b4ba98f0506f269ff7525b446eae06a5ed4a61c18ffa33a886500867\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" podUID="5daf4cb7-d305-4408-97d8-9645cd4e61d5" Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.162651 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" event={"ID":"ccadedcb-9722-4c86-9b22-17d4f9ce1cd7","Type":"ContainerStarted","Data":"77db77a51da1e9342c6da3ca79d257bb42070e7d63c23584cd5fd8e29899ee4c"} Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.174179 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"90ae6fb696be24e0d495b69a3864f01bdb0ba4e52bf052b9a768126a14f3c7e1"} Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.230382 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" event={"ID":"3ae920d7-c605-4984-a072-dad04b3cc6cc","Type":"ContainerStarted","Data":"476a91e0da1fdf1b14c05a2e354c08bfe17cf26dd34f5e2ffed0bc5c0a8ef95d"} Oct 13 07:00:43 crc kubenswrapper[4664]: E1013 07:00:43.232279 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:98a5233f0596591acdf2c6a5838b08be108787cdb6ad1995b2b7886bac0fe6ca\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" podUID="3ae920d7-c605-4984-a072-dad04b3cc6cc" Oct 13 07:00:43 crc kubenswrapper[4664]: I1013 07:00:43.258924 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" event={"ID":"bd3c97ad-17c7-47d7-ae5e-1a67c489c142","Type":"ContainerStarted","Data":"1b58f0479d6e1d6ce7e8c8a0d582d70339f45e1f230d2dfcf5eaff41bc498098"} Oct 13 07:00:43 crc kubenswrapper[4664]: E1013 07:00:43.267311 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:ec11cb8711bd1af22db3c84aa854349ee46191add3db45aecfabb1d8410c04d0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" podUID="bd3c97ad-17c7-47d7-ae5e-1a67c489c142" Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.276207 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" event={"ID":"a096a4c5-5890-4100-8462-ec39d621ff38","Type":"ContainerStarted","Data":"55ef490648c0c6b37798a9c565036ce90bb147d6b2394698d757ee02eb6fc400"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.278264 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" event={"ID":"deda5c15-ffb4-44e6-9e27-465106737111","Type":"ContainerStarted","Data":"49fc7b66ef6f3cb7e9341c4deaf7bb0f67f579a42adbf606a314251d9ff1f5cd"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.288413 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" event={"ID":"b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07","Type":"ContainerStarted","Data":"e7e5efa4cde6a49acd0cb8dbb881557e31bcaf15f6a76e10566346254639f790"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.300696 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" event={"ID":"68c3d701-56d2-4bce-8c6e-e1894084fecf","Type":"ContainerStarted","Data":"07b530820d0483e3ce059b9da4a04b0a874de32bdef0045dcb5d0dbc993934d7"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.306397 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" event={"ID":"5367fac7-2b9f-4745-b3cb-4accdf26ef59","Type":"ContainerStarted","Data":"7a3b5f8af31329d5afa9a968818887be3a347ea510637a2f6a392abc7dd2d216"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.310661 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" event={"ID":"de55f4c7-2413-4a00-8691-a9545525fc88","Type":"ContainerStarted","Data":"ae4704bab7ef0a5f52ccbdb832a533065fa68f85c3767a2c6f4d91d0b1916f36"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.325441 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" event={"ID":"bf55d8b4-9315-48b2-962c-318911833b6f","Type":"ContainerStarted","Data":"7a2d307a061065722b20491f061e9ad75888b6464922559f75c7a96f4aa937e3"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.330377 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" event={"ID":"b28d60cb-ff14-4b64-b7b0-3af252c60311","Type":"ContainerStarted","Data":"aec4a3b5e9f0a38ade01f0b62a0cc7c10509ca7442a69b41090474672dbccd41"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.336041 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" event={"ID":"0e469825-8f36-48dc-8dce-dcbdaf6ca58c","Type":"ContainerStarted","Data":"841fd00597720530b19ad457d04c3a52ec59f9483f3d29651da5332c4b3eeb5d"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.336733 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.354973 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" event={"ID":"86b8e5cb-18d9-4931-afb3-4f8dc9f788f0","Type":"ContainerStarted","Data":"5c883890b2ddf29406a06b85828340fadc139bc127f03207c3de7ca2d8df956c"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.355666 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.367631 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" event={"ID":"3c368f1e-93f0-440a-ad95-d205dd78e4b2","Type":"ContainerStarted","Data":"b1da417165b586fbfdb4fca48f4174c1229504a81e14304c17da1941e7af8ede"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.374856 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" event={"ID":"f837553e-b572-4dcc-91b4-a8e6c2deb097","Type":"ContainerStarted","Data":"254fe49e337dea2f7889706e4f3b466f76b01e39144836a8bdc35304049ba3d4"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.375705 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.382715 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" event={"ID":"3ee22f32-c675-449d-bdb9-670673cf57b4","Type":"ContainerStarted","Data":"757bc9ddfa72713da3b207b5b68224387808b643f8f81c15ed4b304d425df453"} Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.391580 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" event={"ID":"24e25e0a-a138-41fb-b90e-08d800f751b4","Type":"ContainerStarted","Data":"0ed3c71e6d2cd7ed7a30ae9f8a941420f07911d93b25fb56ad94583154796d2b"} Oct 13 07:00:44 crc kubenswrapper[4664]: E1013 07:00:44.397322 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:47278ed28e02df00892f941763aa0d69547327318e8a983e07f4577acd288167\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" podUID="a76ae989-9e97-43ee-a38f-ebb30be19ab6" Oct 13 07:00:44 crc kubenswrapper[4664]: E1013 07:00:44.397775 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:98a5233f0596591acdf2c6a5838b08be108787cdb6ad1995b2b7886bac0fe6ca\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" podUID="3ae920d7-c605-4984-a072-dad04b3cc6cc" Oct 13 07:00:44 crc kubenswrapper[4664]: E1013 07:00:44.397931 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:063a7e65b4ba98f0506f269ff7525b446eae06a5ed4a61c18ffa33a886500867\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" podUID="5daf4cb7-d305-4408-97d8-9645cd4e61d5" Oct 13 07:00:44 crc kubenswrapper[4664]: I1013 07:00:44.810977 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podStartSLOduration=6.5402126240000005 podStartE2EDuration="29.810962047s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.717390448 +0000 UTC m=+826.404835640" lastFinishedPulling="2025-10-13 07:00:41.988139871 +0000 UTC m=+849.675585063" observedRunningTime="2025-10-13 07:00:44.613548049 +0000 UTC m=+852.300993251" watchObservedRunningTime="2025-10-13 07:00:44.810962047 +0000 UTC m=+852.498407239" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.043494 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" podStartSLOduration=5.730171088 podStartE2EDuration="29.043474753s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.781248344 +0000 UTC m=+826.468693546" lastFinishedPulling="2025-10-13 07:00:42.094552019 +0000 UTC m=+849.781997211" observedRunningTime="2025-10-13 07:00:44.92022972 +0000 UTC m=+852.607674912" watchObservedRunningTime="2025-10-13 07:00:45.043474753 +0000 UTC m=+852.730919945" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.091788 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" podStartSLOduration=5.798399173 podStartE2EDuration="29.091773393s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.739822814 +0000 UTC m=+826.427268016" lastFinishedPulling="2025-10-13 07:00:42.033197044 +0000 UTC m=+849.720642236" observedRunningTime="2025-10-13 07:00:45.047944682 +0000 UTC m=+852.735389874" watchObservedRunningTime="2025-10-13 07:00:45.091773393 +0000 UTC m=+852.779218595" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.137291 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" podStartSLOduration=5.974016868 podStartE2EDuration="29.137275057s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.930100358 +0000 UTC m=+826.617545550" lastFinishedPulling="2025-10-13 07:00:42.093358547 +0000 UTC m=+849.780803739" observedRunningTime="2025-10-13 07:00:45.136452076 +0000 UTC m=+852.823897258" watchObservedRunningTime="2025-10-13 07:00:45.137275057 +0000 UTC m=+852.824720249" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.280512 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx" podStartSLOduration=5.980502518 podStartE2EDuration="29.280476329s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.794732392 +0000 UTC m=+826.482177584" lastFinishedPulling="2025-10-13 07:00:42.094706193 +0000 UTC m=+849.782151395" observedRunningTime="2025-10-13 07:00:45.238055526 +0000 UTC m=+852.925500718" watchObservedRunningTime="2025-10-13 07:00:45.280476329 +0000 UTC m=+852.967921521" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.403468 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" event={"ID":"68c3d701-56d2-4bce-8c6e-e1894084fecf","Type":"ContainerStarted","Data":"68c6d875160748b4296e41008539e9e3f9d0ad5ec5789e83d91628341517126d"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.404187 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.409100 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" event={"ID":"de55f4c7-2413-4a00-8691-a9545525fc88","Type":"ContainerStarted","Data":"0c3a33612c5222fca305fff11757f15487d566279a1afa69e3b499d1626fbf37"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.409553 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.421873 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" event={"ID":"deda5c15-ffb4-44e6-9e27-465106737111","Type":"ContainerStarted","Data":"e7344ea199e50d124edc7e9bf6658d541bb7bdb1f93696f17feef3b8bd48f999"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.422161 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.429849 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" event={"ID":"bf55d8b4-9315-48b2-962c-318911833b6f","Type":"ContainerStarted","Data":"66c9af379195a7127b67893813446a82a4c6299827a44803d7ee71014f71241c"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.429922 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.436764 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" event={"ID":"b28d60cb-ff14-4b64-b7b0-3af252c60311","Type":"ContainerStarted","Data":"0e740287be5f15a017e88bf515ee8050171e1a7126170ac9dd60d0cc958770f5"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.440608 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" event={"ID":"3c368f1e-93f0-440a-ad95-d205dd78e4b2","Type":"ContainerStarted","Data":"8a657a969470497cc64c91bcfd5d57c6df4c8abd932a21761d24332e587a1d0f"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.440723 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.443479 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" event={"ID":"5367fac7-2b9f-4745-b3cb-4accdf26ef59","Type":"ContainerStarted","Data":"fbe431ddabdfbb56564a96a88ccf5c0081ddb4bb5b6abc74d14b39ef9d12be1d"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.443868 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.445617 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" event={"ID":"24e25e0a-a138-41fb-b90e-08d800f751b4","Type":"ContainerStarted","Data":"87e9c9a490a8a5eeea4c54412f87e3ef865ef87cb5bc8ada8ec1c6249b22dffd"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.446018 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.447457 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" event={"ID":"9f6b6a3e-5706-49fa-aafa-49f68b19997e","Type":"ContainerStarted","Data":"1002d20b192121f10b82d1de945e024275e49adca570bbe7320badf319869e31"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.447814 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.449081 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" event={"ID":"ccadedcb-9722-4c86-9b22-17d4f9ce1cd7","Type":"ContainerStarted","Data":"57164bb079b45bc0c0387234741bb28ea524d977d8cb37c2a916993c5f78d94e"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.449431 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.451928 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" event={"ID":"b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07","Type":"ContainerStarted","Data":"d2d858d4171be100278e28f8c93dad725d6631844103a8d01752a745d82a51a7"} Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.452023 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.523411 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" podStartSLOduration=10.973494428 podStartE2EDuration="30.523387611s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.42306819 +0000 UTC m=+825.110513382" lastFinishedPulling="2025-10-13 07:00:36.972961373 +0000 UTC m=+844.660406565" observedRunningTime="2025-10-13 07:00:45.518719948 +0000 UTC m=+853.206165150" watchObservedRunningTime="2025-10-13 07:00:45.523387611 +0000 UTC m=+853.210832953" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.528303 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" podStartSLOduration=9.537559865 podStartE2EDuration="30.528288571s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.54542395 +0000 UTC m=+825.232869142" lastFinishedPulling="2025-10-13 07:00:38.536152636 +0000 UTC m=+846.223597848" observedRunningTime="2025-10-13 07:00:45.466190887 +0000 UTC m=+853.153636089" watchObservedRunningTime="2025-10-13 07:00:45.528288571 +0000 UTC m=+853.215733763" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.614164 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" podStartSLOduration=9.687157834 podStartE2EDuration="29.614131494s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.633035908 +0000 UTC m=+826.320481100" lastFinishedPulling="2025-10-13 07:00:38.560009568 +0000 UTC m=+846.247454760" observedRunningTime="2025-10-13 07:00:45.609253335 +0000 UTC m=+853.296698527" watchObservedRunningTime="2025-10-13 07:00:45.614131494 +0000 UTC m=+853.301576686" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.615199 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" podStartSLOduration=9.601180339999999 podStartE2EDuration="29.615193933s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.522878642 +0000 UTC m=+826.210323834" lastFinishedPulling="2025-10-13 07:00:38.536892225 +0000 UTC m=+846.224337427" observedRunningTime="2025-10-13 07:00:45.559517808 +0000 UTC m=+853.246963020" watchObservedRunningTime="2025-10-13 07:00:45.615193933 +0000 UTC m=+853.302639125" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.648091 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" podStartSLOduration=8.764621117 podStartE2EDuration="29.648073613s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.653617654 +0000 UTC m=+825.341062836" lastFinishedPulling="2025-10-13 07:00:38.53707014 +0000 UTC m=+846.224515332" observedRunningTime="2025-10-13 07:00:45.648031322 +0000 UTC m=+853.335476534" watchObservedRunningTime="2025-10-13 07:00:45.648073613 +0000 UTC m=+853.335518795" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.746065 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" podStartSLOduration=10.121628391 podStartE2EDuration="29.746047317s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.91061044 +0000 UTC m=+826.598055632" lastFinishedPulling="2025-10-13 07:00:38.535029346 +0000 UTC m=+846.222474558" observedRunningTime="2025-10-13 07:00:45.706750807 +0000 UTC m=+853.394196009" watchObservedRunningTime="2025-10-13 07:00:45.746047317 +0000 UTC m=+853.433492499" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.747662 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" podStartSLOduration=10.204966852 podStartE2EDuration="30.74765842s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.962172759 +0000 UTC m=+825.649617951" lastFinishedPulling="2025-10-13 07:00:38.504864327 +0000 UTC m=+846.192309519" observedRunningTime="2025-10-13 07:00:45.745144993 +0000 UTC m=+853.432590195" watchObservedRunningTime="2025-10-13 07:00:45.74765842 +0000 UTC m=+853.435103612" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.766158 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" podStartSLOduration=9.849406309 podStartE2EDuration="30.766140279s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.620774692 +0000 UTC m=+825.308219884" lastFinishedPulling="2025-10-13 07:00:38.537508642 +0000 UTC m=+846.224953854" observedRunningTime="2025-10-13 07:00:45.765221635 +0000 UTC m=+853.452666837" watchObservedRunningTime="2025-10-13 07:00:45.766140279 +0000 UTC m=+853.453585461" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.790512 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" podStartSLOduration=9.876710182 podStartE2EDuration="30.790489284s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.621311266 +0000 UTC m=+825.308756458" lastFinishedPulling="2025-10-13 07:00:38.535090348 +0000 UTC m=+846.222535560" observedRunningTime="2025-10-13 07:00:45.788155302 +0000 UTC m=+853.475600494" watchObservedRunningTime="2025-10-13 07:00:45.790489284 +0000 UTC m=+853.477934476" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.814578 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" podStartSLOduration=9.689223852 podStartE2EDuration="29.814553571s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.434678929 +0000 UTC m=+826.122124121" lastFinishedPulling="2025-10-13 07:00:38.560008648 +0000 UTC m=+846.247453840" observedRunningTime="2025-10-13 07:00:45.812097226 +0000 UTC m=+853.499542428" watchObservedRunningTime="2025-10-13 07:00:45.814553571 +0000 UTC m=+853.501998763" Oct 13 07:00:45 crc kubenswrapper[4664]: I1013 07:00:45.849910 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" podStartSLOduration=10.738572149 podStartE2EDuration="30.849895767s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.425456834 +0000 UTC m=+826.112902026" lastFinishedPulling="2025-10-13 07:00:38.536780432 +0000 UTC m=+846.224225644" observedRunningTime="2025-10-13 07:00:45.844861734 +0000 UTC m=+853.532306936" watchObservedRunningTime="2025-10-13 07:00:45.849895767 +0000 UTC m=+853.537340959" Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.462358 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" event={"ID":"91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026","Type":"ContainerStarted","Data":"f3d6d655792c3c3d23f577d59f72d11f7bcb81beb8270ff66d5e0ec8578b1562"} Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.463313 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.464594 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" event={"ID":"bd3c97ad-17c7-47d7-ae5e-1a67c489c142","Type":"ContainerStarted","Data":"dbdc8ce675e93bd5a8145b066c463d7a8153ebc63b166dc2bf1c75e506741da8"} Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.466174 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.500051 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" podStartSLOduration=3.307207226 podStartE2EDuration="30.500034643s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.731236055 +0000 UTC m=+825.418681247" lastFinishedPulling="2025-10-13 07:00:44.924063472 +0000 UTC m=+852.611508664" observedRunningTime="2025-10-13 07:00:46.494493686 +0000 UTC m=+854.181938878" watchObservedRunningTime="2025-10-13 07:00:46.500034643 +0000 UTC m=+854.187479835" Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.518339 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" podStartSLOduration=4.089565892 podStartE2EDuration="31.518323697s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.207394513 +0000 UTC m=+825.894839705" lastFinishedPulling="2025-10-13 07:00:45.636152318 +0000 UTC m=+853.323597510" observedRunningTime="2025-10-13 07:00:46.514631319 +0000 UTC m=+854.202076511" watchObservedRunningTime="2025-10-13 07:00:46.518323697 +0000 UTC m=+854.205768879" Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.532228 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" Oct 13 07:00:46 crc kubenswrapper[4664]: I1013 07:00:46.930546 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:47 crc kubenswrapper[4664]: I1013 07:00:47.478597 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.177601 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.232903 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.384742 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.435633 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.448131 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.536166 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.536698 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.552434 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" event={"ID":"a76ae989-9e97-43ee-a38f-ebb30be19ab6","Type":"ContainerStarted","Data":"a09b6d8b462bf48e00d4681ee807e40e518b28a4f80841e50be6243d3a1c532d"} Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.552660 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.592677 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.621132 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" podStartSLOduration=2.499778166 podStartE2EDuration="40.621117309s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:17.966501355 +0000 UTC m=+825.653946547" lastFinishedPulling="2025-10-13 07:00:56.087840448 +0000 UTC m=+863.775285690" observedRunningTime="2025-10-13 07:00:56.617252946 +0000 UTC m=+864.304698158" watchObservedRunningTime="2025-10-13 07:00:56.621117309 +0000 UTC m=+864.308562501" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.626323 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.673045 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.674497 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.937537 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.967457 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" Oct 13 07:00:56 crc kubenswrapper[4664]: I1013 07:00:56.969988 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-6qvrk" Oct 13 07:00:57 crc kubenswrapper[4664]: I1013 07:00:57.021399 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" Oct 13 07:00:57 crc kubenswrapper[4664]: I1013 07:00:57.026324 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" Oct 13 07:00:57 crc kubenswrapper[4664]: I1013 07:00:57.311426 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:00:57 crc kubenswrapper[4664]: I1013 07:00:57.559602 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" event={"ID":"3ae920d7-c605-4984-a072-dad04b3cc6cc","Type":"ContainerStarted","Data":"51a3cfa2b4fcd061d99ba9d22afc0eb1820a568de70df0553753810d522679e1"} Oct 13 07:00:57 crc kubenswrapper[4664]: I1013 07:00:57.560201 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" Oct 13 07:00:57 crc kubenswrapper[4664]: I1013 07:00:57.575681 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" podStartSLOduration=3.372885184 podStartE2EDuration="41.575659285s" podCreationTimestamp="2025-10-13 07:00:16 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.700302775 +0000 UTC m=+826.387747967" lastFinishedPulling="2025-10-13 07:00:56.903076876 +0000 UTC m=+864.590522068" observedRunningTime="2025-10-13 07:00:57.57549847 +0000 UTC m=+865.262943662" watchObservedRunningTime="2025-10-13 07:00:57.575659285 +0000 UTC m=+865.263104477" Oct 13 07:00:59 crc kubenswrapper[4664]: I1013 07:00:59.576257 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" event={"ID":"5daf4cb7-d305-4408-97d8-9645cd4e61d5","Type":"ContainerStarted","Data":"3705211f1f626ae85b38363974e2a142d6082c587c544d60650e885425ca658f"} Oct 13 07:00:59 crc kubenswrapper[4664]: I1013 07:00:59.577471 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" Oct 13 07:01:06 crc kubenswrapper[4664]: I1013 07:01:06.475714 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" Oct 13 07:01:06 crc kubenswrapper[4664]: I1013 07:01:06.507724 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" podStartSLOduration=10.774763939 podStartE2EDuration="51.507695247s" podCreationTimestamp="2025-10-13 07:00:15 +0000 UTC" firstStartedPulling="2025-10-13 07:00:18.440618337 +0000 UTC m=+826.128063529" lastFinishedPulling="2025-10-13 07:00:59.173549615 +0000 UTC m=+866.860994837" observedRunningTime="2025-10-13 07:00:59.608373519 +0000 UTC m=+867.295818711" watchObservedRunningTime="2025-10-13 07:01:06.507695247 +0000 UTC m=+874.195140459" Oct 13 07:01:06 crc kubenswrapper[4664]: I1013 07:01:06.578814 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" Oct 13 07:01:07 crc kubenswrapper[4664]: I1013 07:01:07.031904 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.308300 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-656586ff77-jkszh"] Oct 13 07:01:22 crc kubenswrapper[4664]: E1013 07:01:22.309151 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="extract-content" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.309165 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="extract-content" Oct 13 07:01:22 crc kubenswrapper[4664]: E1013 07:01:22.309190 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="registry-server" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.309196 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="registry-server" Oct 13 07:01:22 crc kubenswrapper[4664]: E1013 07:01:22.309211 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="extract-utilities" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.309217 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="extract-utilities" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.309366 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="73874794-d495-48e3-9087-8c37dc2dd70c" containerName="registry-server" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.310114 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.313436 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.316606 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.316966 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-c8bgh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.317441 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.322005 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-656586ff77-jkszh"] Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.381718 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7876f7ff45-xz4jl"] Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.382757 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.392775 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.402853 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qq8j\" (UniqueName: \"kubernetes.io/projected/64156830-a987-4481-a1e4-8d6fbe9fc22b-kube-api-access-8qq8j\") pod \"dnsmasq-dns-656586ff77-jkszh\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.402941 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64156830-a987-4481-a1e4-8d6fbe9fc22b-config\") pod \"dnsmasq-dns-656586ff77-jkszh\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.403145 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7876f7ff45-xz4jl"] Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.503843 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qq8j\" (UniqueName: \"kubernetes.io/projected/64156830-a987-4481-a1e4-8d6fbe9fc22b-kube-api-access-8qq8j\") pod \"dnsmasq-dns-656586ff77-jkszh\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.503887 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64156830-a987-4481-a1e4-8d6fbe9fc22b-config\") pod \"dnsmasq-dns-656586ff77-jkszh\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.503944 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-dns-svc\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.503969 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-config\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.504008 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql5p6\" (UniqueName: \"kubernetes.io/projected/64150d45-539f-46e7-82ca-aa2e9eec369b-kube-api-access-ql5p6\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.504890 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64156830-a987-4481-a1e4-8d6fbe9fc22b-config\") pod \"dnsmasq-dns-656586ff77-jkszh\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.523416 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qq8j\" (UniqueName: \"kubernetes.io/projected/64156830-a987-4481-a1e4-8d6fbe9fc22b-kube-api-access-8qq8j\") pod \"dnsmasq-dns-656586ff77-jkszh\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.605313 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-dns-svc\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.605849 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-config\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.605997 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql5p6\" (UniqueName: \"kubernetes.io/projected/64150d45-539f-46e7-82ca-aa2e9eec369b-kube-api-access-ql5p6\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.606307 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-dns-svc\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.606718 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-config\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.625885 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql5p6\" (UniqueName: \"kubernetes.io/projected/64150d45-539f-46e7-82ca-aa2e9eec369b-kube-api-access-ql5p6\") pod \"dnsmasq-dns-7876f7ff45-xz4jl\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.633321 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:01:22 crc kubenswrapper[4664]: I1013 07:01:22.700564 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:01:23 crc kubenswrapper[4664]: I1013 07:01:23.071094 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-656586ff77-jkszh"] Oct 13 07:01:23 crc kubenswrapper[4664]: I1013 07:01:23.077148 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:01:23 crc kubenswrapper[4664]: W1013 07:01:23.167286 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64150d45_539f_46e7_82ca_aa2e9eec369b.slice/crio-493f464cb87059524e0fda5d005794c64c68721a69363b40682091d468f7bc7d WatchSource:0}: Error finding container 493f464cb87059524e0fda5d005794c64c68721a69363b40682091d468f7bc7d: Status 404 returned error can't find the container with id 493f464cb87059524e0fda5d005794c64c68721a69363b40682091d468f7bc7d Oct 13 07:01:23 crc kubenswrapper[4664]: I1013 07:01:23.178219 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7876f7ff45-xz4jl"] Oct 13 07:01:23 crc kubenswrapper[4664]: I1013 07:01:23.774874 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" event={"ID":"64150d45-539f-46e7-82ca-aa2e9eec369b","Type":"ContainerStarted","Data":"493f464cb87059524e0fda5d005794c64c68721a69363b40682091d468f7bc7d"} Oct 13 07:01:23 crc kubenswrapper[4664]: I1013 07:01:23.776763 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-656586ff77-jkszh" event={"ID":"64156830-a987-4481-a1e4-8d6fbe9fc22b","Type":"ContainerStarted","Data":"ae4bb7a5234d55f25493aec876da6a5f9e4027cbd701bb68e07d3217f77f1cf2"} Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.065094 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-656586ff77-jkszh"] Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.090466 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7596fbdcc-q4cvr"] Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.091637 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.114041 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7596fbdcc-q4cvr"] Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.246960 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-dns-svc\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.247010 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmtd5\" (UniqueName: \"kubernetes.io/projected/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-kube-api-access-mmtd5\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.247039 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-config\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.348007 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-dns-svc\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.348055 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmtd5\" (UniqueName: \"kubernetes.io/projected/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-kube-api-access-mmtd5\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.348082 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-config\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.349141 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-config\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.352749 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-dns-svc\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.407505 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7876f7ff45-xz4jl"] Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.418840 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmtd5\" (UniqueName: \"kubernetes.io/projected/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-kube-api-access-mmtd5\") pod \"dnsmasq-dns-7596fbdcc-q4cvr\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.436707 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69bb789bb9-h2knf"] Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.444421 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.478555 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69bb789bb9-h2knf"] Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.555244 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5xs6\" (UniqueName: \"kubernetes.io/projected/1b530111-87c3-4bf7-9e59-5a6faffa86c9-kube-api-access-m5xs6\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.555325 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-config\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.555388 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-dns-svc\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.656514 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-config\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.656602 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-dns-svc\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.657507 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5xs6\" (UniqueName: \"kubernetes.io/projected/1b530111-87c3-4bf7-9e59-5a6faffa86c9-kube-api-access-m5xs6\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.658028 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-config\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.658619 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-dns-svc\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.683029 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5xs6\" (UniqueName: \"kubernetes.io/projected/1b530111-87c3-4bf7-9e59-5a6faffa86c9-kube-api-access-m5xs6\") pod \"dnsmasq-dns-69bb789bb9-h2knf\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.712347 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:01:25 crc kubenswrapper[4664]: I1013 07:01:25.775548 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.249579 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.265235 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.271587 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.272617 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.272806 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.273850 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.274000 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xfxcb" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.275764 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.277603 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.299636 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.312686 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69bb789bb9-h2knf"] Oct 13 07:01:26 crc kubenswrapper[4664]: W1013 07:01:26.322401 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b530111_87c3_4bf7_9e59_5a6faffa86c9.slice/crio-99aceecfcbe337cc25976861974d07449991c5b84214d0531aeb85273bd06968 WatchSource:0}: Error finding container 99aceecfcbe337cc25976861974d07449991c5b84214d0531aeb85273bd06968: Status 404 returned error can't find the container with id 99aceecfcbe337cc25976861974d07449991c5b84214d0531aeb85273bd06968 Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.381585 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382145 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382179 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382251 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382279 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382314 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-config-data\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382341 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382576 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382672 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.382915 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd6x2\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-kube-api-access-hd6x2\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.383099 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.401496 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7596fbdcc-q4cvr"] Oct 13 07:01:26 crc kubenswrapper[4664]: W1013 07:01:26.426834 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff1e67ca_3da1_48cf_b5f4_4bd7f693e78c.slice/crio-2867c7c52f41c87cc601be9d88aff70676444fd19aedfe38053d59c1cf7238f1 WatchSource:0}: Error finding container 2867c7c52f41c87cc601be9d88aff70676444fd19aedfe38053d59c1cf7238f1: Status 404 returned error can't find the container with id 2867c7c52f41c87cc601be9d88aff70676444fd19aedfe38053d59c1cf7238f1 Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485424 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485494 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485531 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485554 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485583 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485600 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485625 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-config-data\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485646 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485679 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485708 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.485728 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd6x2\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-kube-api-access-hd6x2\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.487659 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.487895 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.488852 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.489360 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-config-data\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.488763 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.492514 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.495555 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.496678 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.500958 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.502156 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.505085 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd6x2\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-kube-api-access-hd6x2\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.520078 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.597696 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.663736 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.665254 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.673578 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.673769 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-j24j9" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.673875 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.676423 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.677615 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.677860 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.678056 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.679119 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790242 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790286 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790324 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf9g2\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-kube-api-access-gf9g2\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790371 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790403 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790443 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790461 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790485 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/180df95d-8f2f-4266-ae45-f9237a3aa1b7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790572 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790638 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/180df95d-8f2f-4266-ae45-f9237a3aa1b7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.790723 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.860906 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" event={"ID":"1b530111-87c3-4bf7-9e59-5a6faffa86c9","Type":"ContainerStarted","Data":"99aceecfcbe337cc25976861974d07449991c5b84214d0531aeb85273bd06968"} Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.862767 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" event={"ID":"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c","Type":"ContainerStarted","Data":"2867c7c52f41c87cc601be9d88aff70676444fd19aedfe38053d59c1cf7238f1"} Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.892746 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893253 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893281 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893325 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893346 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/180df95d-8f2f-4266-ae45-f9237a3aa1b7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893394 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893411 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/180df95d-8f2f-4266-ae45-f9237a3aa1b7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893473 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893512 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893530 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.893572 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf9g2\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-kube-api-access-gf9g2\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.894902 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.895168 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.900016 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.903455 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.905034 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.905965 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.911633 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/180df95d-8f2f-4266-ae45-f9237a3aa1b7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.913089 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.914396 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/180df95d-8f2f-4266-ae45-f9237a3aa1b7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.914775 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.915976 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf9g2\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-kube-api-access-gf9g2\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.933889 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:01:26 crc kubenswrapper[4664]: I1013 07:01:26.954745 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:27 crc kubenswrapper[4664]: I1013 07:01:27.004337 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:01:27 crc kubenswrapper[4664]: I1013 07:01:27.519592 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:01:27 crc kubenswrapper[4664]: I1013 07:01:27.874252 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"180df95d-8f2f-4266-ae45-f9237a3aa1b7","Type":"ContainerStarted","Data":"8777a4a620be9857a21e071cd5c8efbd1fefafb5e0cf37c44cf654df2579f137"} Oct 13 07:01:27 crc kubenswrapper[4664]: I1013 07:01:27.875929 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b3c7c8de-e6ed-440c-9f15-635fa77e35e7","Type":"ContainerStarted","Data":"749a588fdc08c9d7389a9420d82cb807ee46c20a987c6950d7028b88f7b0c32d"} Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.044289 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.046326 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.048491 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.051266 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.051504 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.051615 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-r4psh" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.051902 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.052675 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.055882 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.128474 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc0510b6-15a8-4d1a-93c3-f92869340539-config-data-generated\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.128511 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-operator-scripts\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.130777 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-secrets\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.130888 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-kolla-config\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.130920 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmxcf\" (UniqueName: \"kubernetes.io/projected/cc0510b6-15a8-4d1a-93c3-f92869340539-kube-api-access-dmxcf\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.130944 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.130970 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.131002 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.131029 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-config-data-default\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.231854 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc0510b6-15a8-4d1a-93c3-f92869340539-config-data-generated\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.232055 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-operator-scripts\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.232102 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-secrets\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.233731 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-operator-scripts\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.233918 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc0510b6-15a8-4d1a-93c3-f92869340539-config-data-generated\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.234590 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-kolla-config\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.234614 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmxcf\" (UniqueName: \"kubernetes.io/projected/cc0510b6-15a8-4d1a-93c3-f92869340539-kube-api-access-dmxcf\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.234631 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.234655 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.234677 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.234695 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-config-data-default\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.237047 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-kolla-config\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.237153 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.237989 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc0510b6-15a8-4d1a-93c3-f92869340539-config-data-default\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.247590 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-secrets\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.258296 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.260825 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc0510b6-15a8-4d1a-93c3-f92869340539-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.266698 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.295604 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmxcf\" (UniqueName: \"kubernetes.io/projected/cc0510b6-15a8-4d1a-93c3-f92869340539-kube-api-access-dmxcf\") pod \"openstack-galera-0\" (UID: \"cc0510b6-15a8-4d1a-93c3-f92869340539\") " pod="openstack/openstack-galera-0" Oct 13 07:01:28 crc kubenswrapper[4664]: I1013 07:01:28.427449 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.010466 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.382162 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.384285 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.389079 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.389231 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.389366 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-l8bgz" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.389490 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.394321 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.555877 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.555918 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.555940 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.555966 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.555980 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.555997 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.556025 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x8wx\" (UniqueName: \"kubernetes.io/projected/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-kube-api-access-5x8wx\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.556057 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.556078 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659127 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659196 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659228 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659261 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659283 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659303 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659343 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x8wx\" (UniqueName: \"kubernetes.io/projected/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-kube-api-access-5x8wx\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659376 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.659395 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.660018 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.660944 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.661158 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.661537 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.662382 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.669998 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.675311 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.675487 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.738939 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.739227 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.740209 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.746013 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x8wx\" (UniqueName: \"kubernetes.io/projected/a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931-kube-api-access-5x8wx\") pod \"openstack-cell1-galera-0\" (UID: \"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931\") " pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.751767 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-k8fbc" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.752116 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.752254 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.790343 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.873491 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f87e03e-f1be-4f12-a267-393fcde6e51e-config-data\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.873598 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f87e03e-f1be-4f12-a267-393fcde6e51e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.873644 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f87e03e-f1be-4f12-a267-393fcde6e51e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.873671 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5f87e03e-f1be-4f12-a267-393fcde6e51e-kolla-config\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.873704 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzhwl\" (UniqueName: \"kubernetes.io/projected/5f87e03e-f1be-4f12-a267-393fcde6e51e-kube-api-access-bzhwl\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.927889 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc0510b6-15a8-4d1a-93c3-f92869340539","Type":"ContainerStarted","Data":"158b85ee8e8c71b460b067e8c278b440bc1b54b75e3e4864439fc9c1740e4fb7"} Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.975508 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5f87e03e-f1be-4f12-a267-393fcde6e51e-kolla-config\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.975581 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzhwl\" (UniqueName: \"kubernetes.io/projected/5f87e03e-f1be-4f12-a267-393fcde6e51e-kube-api-access-bzhwl\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.975611 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f87e03e-f1be-4f12-a267-393fcde6e51e-config-data\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.975910 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f87e03e-f1be-4f12-a267-393fcde6e51e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.975986 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f87e03e-f1be-4f12-a267-393fcde6e51e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.976734 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f87e03e-f1be-4f12-a267-393fcde6e51e-config-data\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.977019 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5f87e03e-f1be-4f12-a267-393fcde6e51e-kolla-config\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.981758 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f87e03e-f1be-4f12-a267-393fcde6e51e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.990172 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f87e03e-f1be-4f12-a267-393fcde6e51e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:29 crc kubenswrapper[4664]: I1013 07:01:29.997477 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzhwl\" (UniqueName: \"kubernetes.io/projected/5f87e03e-f1be-4f12-a267-393fcde6e51e-kube-api-access-bzhwl\") pod \"memcached-0\" (UID: \"5f87e03e-f1be-4f12-a267-393fcde6e51e\") " pod="openstack/memcached-0" Oct 13 07:01:30 crc kubenswrapper[4664]: I1013 07:01:30.031519 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 13 07:01:30 crc kubenswrapper[4664]: I1013 07:01:30.139248 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 13 07:01:30 crc kubenswrapper[4664]: I1013 07:01:30.542155 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 13 07:01:30 crc kubenswrapper[4664]: W1013 07:01:30.566977 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5d8fbdf_76bc_48d9_83b9_9d3ff6d58931.slice/crio-1c4c44fd8b0626834a989c1c120371f9aeba03b1561507a53d0635d70c668956 WatchSource:0}: Error finding container 1c4c44fd8b0626834a989c1c120371f9aeba03b1561507a53d0635d70c668956: Status 404 returned error can't find the container with id 1c4c44fd8b0626834a989c1c120371f9aeba03b1561507a53d0635d70c668956 Oct 13 07:01:30 crc kubenswrapper[4664]: I1013 07:01:30.749431 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 13 07:01:30 crc kubenswrapper[4664]: I1013 07:01:30.943723 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931","Type":"ContainerStarted","Data":"1c4c44fd8b0626834a989c1c120371f9aeba03b1561507a53d0635d70c668956"} Oct 13 07:01:30 crc kubenswrapper[4664]: I1013 07:01:30.945730 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"5f87e03e-f1be-4f12-a267-393fcde6e51e","Type":"ContainerStarted","Data":"c9668944e7f9aaaaa01ad1061af63e1c521c75b20f5196798381eb94c7518e54"} Oct 13 07:01:31 crc kubenswrapper[4664]: I1013 07:01:31.747617 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:01:31 crc kubenswrapper[4664]: I1013 07:01:31.748771 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 13 07:01:31 crc kubenswrapper[4664]: I1013 07:01:31.750949 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-2hztt" Oct 13 07:01:31 crc kubenswrapper[4664]: I1013 07:01:31.773374 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:01:31 crc kubenswrapper[4664]: I1013 07:01:31.922248 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztwcm\" (UniqueName: \"kubernetes.io/projected/21624285-c826-40e3-8963-a2ac3cf7efd8-kube-api-access-ztwcm\") pod \"kube-state-metrics-0\" (UID: \"21624285-c826-40e3-8963-a2ac3cf7efd8\") " pod="openstack/kube-state-metrics-0" Oct 13 07:01:32 crc kubenswrapper[4664]: I1013 07:01:32.024254 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztwcm\" (UniqueName: \"kubernetes.io/projected/21624285-c826-40e3-8963-a2ac3cf7efd8-kube-api-access-ztwcm\") pod \"kube-state-metrics-0\" (UID: \"21624285-c826-40e3-8963-a2ac3cf7efd8\") " pod="openstack/kube-state-metrics-0" Oct 13 07:01:32 crc kubenswrapper[4664]: I1013 07:01:32.048260 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztwcm\" (UniqueName: \"kubernetes.io/projected/21624285-c826-40e3-8963-a2ac3cf7efd8-kube-api-access-ztwcm\") pod \"kube-state-metrics-0\" (UID: \"21624285-c826-40e3-8963-a2ac3cf7efd8\") " pod="openstack/kube-state-metrics-0" Oct 13 07:01:32 crc kubenswrapper[4664]: I1013 07:01:32.103005 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 13 07:01:32 crc kubenswrapper[4664]: I1013 07:01:32.628558 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:01:32 crc kubenswrapper[4664]: I1013 07:01:32.994693 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21624285-c826-40e3-8963-a2ac3cf7efd8","Type":"ContainerStarted","Data":"7932489b6a6772bc6d5e24a9626949ae4fb10def5a473f1f023fe8e2ec1116db"} Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.803183 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-47b95"] Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.804391 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.813331 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.813355 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-h2h24" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.813447 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.817093 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-combined-ca-bundle\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.817180 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-scripts\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.817398 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-run-ovn\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.817584 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbfbr\" (UniqueName: \"kubernetes.io/projected/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-kube-api-access-wbfbr\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.817668 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-run\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.817736 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-ovn-controller-tls-certs\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.817754 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-log-ovn\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.822083 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95"] Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.854866 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-frzn7"] Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.864192 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.884544 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-frzn7"] Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919155 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-etc-ovs\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919208 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-log\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919227 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-scripts\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919246 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-lib\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919271 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-combined-ca-bundle\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919297 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-scripts\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919335 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-run\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919351 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn6f7\" (UniqueName: \"kubernetes.io/projected/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-kube-api-access-rn6f7\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919369 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-run-ovn\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.919390 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbfbr\" (UniqueName: \"kubernetes.io/projected/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-kube-api-access-wbfbr\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.920026 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-run-ovn\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.921516 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-scripts\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.921588 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-run\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.921722 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-run\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.921774 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-ovn-controller-tls-certs\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.921863 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-log-ovn\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.922017 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-var-log-ovn\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.941131 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-ovn-controller-tls-certs\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.942366 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-combined-ca-bundle\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:34 crc kubenswrapper[4664]: I1013 07:01:34.950109 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbfbr\" (UniqueName: \"kubernetes.io/projected/7c8ef9b2-22fd-4d00-b710-8e22b4fefecf-kube-api-access-wbfbr\") pod \"ovn-controller-47b95\" (UID: \"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf\") " pod="openstack/ovn-controller-47b95" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.023686 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-etc-ovs\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.023740 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-log\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.023754 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-scripts\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.023776 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-lib\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.024153 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-lib\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.024236 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-etc-ovs\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.024246 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-log\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.024344 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-run\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.024368 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn6f7\" (UniqueName: \"kubernetes.io/projected/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-kube-api-access-rn6f7\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.024464 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-var-run\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.025784 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-scripts\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.044664 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn6f7\" (UniqueName: \"kubernetes.io/projected/e96bba9a-6e2d-49e9-9543-a58d6c5de1fb-kube-api-access-rn6f7\") pod \"ovn-controller-ovs-frzn7\" (UID: \"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb\") " pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.164677 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.201911 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.307133 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.312780 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.315614 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.316424 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.316588 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.317993 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-c4wmj" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.320399 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.331477 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhmsm\" (UniqueName: \"kubernetes.io/projected/9294acab-d17a-4e3b-bf85-19429846ca0c-kube-api-access-qhmsm\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.331536 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9294acab-d17a-4e3b-bf85-19429846ca0c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.331583 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.331615 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9294acab-d17a-4e3b-bf85-19429846ca0c-config\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.337087 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.337133 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.337158 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.337226 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9294acab-d17a-4e3b-bf85-19429846ca0c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.350703 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444531 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9294acab-d17a-4e3b-bf85-19429846ca0c-config\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444588 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444615 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444643 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444689 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9294acab-d17a-4e3b-bf85-19429846ca0c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444721 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhmsm\" (UniqueName: \"kubernetes.io/projected/9294acab-d17a-4e3b-bf85-19429846ca0c-kube-api-access-qhmsm\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444743 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9294acab-d17a-4e3b-bf85-19429846ca0c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.444774 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.445502 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9294acab-d17a-4e3b-bf85-19429846ca0c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.445574 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.445895 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9294acab-d17a-4e3b-bf85-19429846ca0c-config\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.446301 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9294acab-d17a-4e3b-bf85-19429846ca0c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.468433 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.470714 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.472447 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhmsm\" (UniqueName: \"kubernetes.io/projected/9294acab-d17a-4e3b-bf85-19429846ca0c-kube-api-access-qhmsm\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.473667 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9294acab-d17a-4e3b-bf85-19429846ca0c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.494463 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9294acab-d17a-4e3b-bf85-19429846ca0c\") " pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:35 crc kubenswrapper[4664]: I1013 07:01:35.632143 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 13 07:01:36 crc kubenswrapper[4664]: I1013 07:01:36.076086 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95"] Oct 13 07:01:36 crc kubenswrapper[4664]: I1013 07:01:36.714029 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-frzn7"] Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.041731 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95" event={"ID":"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf","Type":"ContainerStarted","Data":"87da31112940e6d236880cae2ead3047cc10cce304817bcada754c54afc7b7bc"} Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.043853 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frzn7" event={"ID":"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb","Type":"ContainerStarted","Data":"0ff7514755d6460b17c23b7d7d6d551b87fdf04f2d3d5423fdc17ba39a39a87e"} Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.398000 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 13 07:01:37 crc kubenswrapper[4664]: W1013 07:01:37.426373 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9294acab_d17a_4e3b_bf85_19429846ca0c.slice/crio-1065852a9b21a1c56c3fe12a1b16f435ea5ce7b58985077311eaf9d62af94c89 WatchSource:0}: Error finding container 1065852a9b21a1c56c3fe12a1b16f435ea5ce7b58985077311eaf9d62af94c89: Status 404 returned error can't find the container with id 1065852a9b21a1c56c3fe12a1b16f435ea5ce7b58985077311eaf9d62af94c89 Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.681747 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-jdzlh"] Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.684080 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.687210 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.798000 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/849abee9-7bc4-4b49-82d3-a98fd5f192f4-ovs-rundir\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.798102 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/849abee9-7bc4-4b49-82d3-a98fd5f192f4-config\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.798125 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/849abee9-7bc4-4b49-82d3-a98fd5f192f4-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.798146 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/849abee9-7bc4-4b49-82d3-a98fd5f192f4-ovn-rundir\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.798162 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xftqn\" (UniqueName: \"kubernetes.io/projected/849abee9-7bc4-4b49-82d3-a98fd5f192f4-kube-api-access-xftqn\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.798222 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849abee9-7bc4-4b49-82d3-a98fd5f192f4-combined-ca-bundle\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.802647 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-jdzlh"] Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.869852 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69bb789bb9-h2knf"] Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.887628 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7494c5547c-rgdd7"] Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.888914 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.893131 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.899334 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/849abee9-7bc4-4b49-82d3-a98fd5f192f4-config\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.899379 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/849abee9-7bc4-4b49-82d3-a98fd5f192f4-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.899404 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/849abee9-7bc4-4b49-82d3-a98fd5f192f4-ovn-rundir\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.899422 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xftqn\" (UniqueName: \"kubernetes.io/projected/849abee9-7bc4-4b49-82d3-a98fd5f192f4-kube-api-access-xftqn\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.899504 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849abee9-7bc4-4b49-82d3-a98fd5f192f4-combined-ca-bundle\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.899560 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/849abee9-7bc4-4b49-82d3-a98fd5f192f4-ovs-rundir\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.899934 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/849abee9-7bc4-4b49-82d3-a98fd5f192f4-ovs-rundir\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.900069 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/849abee9-7bc4-4b49-82d3-a98fd5f192f4-config\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.900383 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/849abee9-7bc4-4b49-82d3-a98fd5f192f4-ovn-rundir\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.906976 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/849abee9-7bc4-4b49-82d3-a98fd5f192f4-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.910030 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849abee9-7bc4-4b49-82d3-a98fd5f192f4-combined-ca-bundle\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.912103 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7494c5547c-rgdd7"] Oct 13 07:01:37 crc kubenswrapper[4664]: I1013 07:01:37.926722 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xftqn\" (UniqueName: \"kubernetes.io/projected/849abee9-7bc4-4b49-82d3-a98fd5f192f4-kube-api-access-xftqn\") pod \"ovn-controller-metrics-jdzlh\" (UID: \"849abee9-7bc4-4b49-82d3-a98fd5f192f4\") " pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.001834 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-dns-svc\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.001905 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-config\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.001940 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96clr\" (UniqueName: \"kubernetes.io/projected/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-kube-api-access-96clr\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.001962 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-ovsdbserver-nb\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.009686 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-jdzlh" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.064976 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21624285-c826-40e3-8963-a2ac3cf7efd8","Type":"ContainerStarted","Data":"8cbc1f6b16fdf110ead5e896ff6d02f58da39021ae4f918cb9c29f39225ea7dd"} Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.065849 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.077023 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9294acab-d17a-4e3b-bf85-19429846ca0c","Type":"ContainerStarted","Data":"1065852a9b21a1c56c3fe12a1b16f435ea5ce7b58985077311eaf9d62af94c89"} Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.082040 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.853665103 podStartE2EDuration="7.082025966s" podCreationTimestamp="2025-10-13 07:01:31 +0000 UTC" firstStartedPulling="2025-10-13 07:01:32.673037851 +0000 UTC m=+900.360483043" lastFinishedPulling="2025-10-13 07:01:36.901398714 +0000 UTC m=+904.588843906" observedRunningTime="2025-10-13 07:01:38.077812284 +0000 UTC m=+905.765257486" watchObservedRunningTime="2025-10-13 07:01:38.082025966 +0000 UTC m=+905.769471148" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.103886 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96clr\" (UniqueName: \"kubernetes.io/projected/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-kube-api-access-96clr\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.103935 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-ovsdbserver-nb\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.104987 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-dns-svc\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.105042 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-config\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.105219 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-ovsdbserver-nb\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.105825 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-config\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.106360 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-dns-svc\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.123308 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96clr\" (UniqueName: \"kubernetes.io/projected/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-kube-api-access-96clr\") pod \"dnsmasq-dns-7494c5547c-rgdd7\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.278784 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.628483 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-jdzlh"] Oct 13 07:01:38 crc kubenswrapper[4664]: I1013 07:01:38.895224 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7494c5547c-rgdd7"] Oct 13 07:01:38 crc kubenswrapper[4664]: W1013 07:01:38.902511 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e5e3189_3355_4f5e_a87e_d1ee7a77d221.slice/crio-7b178b5b65e22aaf898f0969ad88e3eb60a732c849ba37688c7a2c97225de76d WatchSource:0}: Error finding container 7b178b5b65e22aaf898f0969ad88e3eb60a732c849ba37688c7a2c97225de76d: Status 404 returned error can't find the container with id 7b178b5b65e22aaf898f0969ad88e3eb60a732c849ba37688c7a2c97225de76d Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.104498 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" event={"ID":"2e5e3189-3355-4f5e-a87e-d1ee7a77d221","Type":"ContainerStarted","Data":"7b178b5b65e22aaf898f0969ad88e3eb60a732c849ba37688c7a2c97225de76d"} Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.108058 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-jdzlh" event={"ID":"849abee9-7bc4-4b49-82d3-a98fd5f192f4","Type":"ContainerStarted","Data":"e4d9f36c3ddcb495957f0c7d1b31a7512139fddc678fec3d5cf8fe7f8245b2a3"} Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.228993 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.231560 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.237235 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.237593 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.237948 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.238208 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-2prnj" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.253214 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.325787 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.325872 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4431fc75-3beb-408f-8981-ef409291bd2d-config\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.325934 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4431fc75-3beb-408f-8981-ef409291bd2d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.325958 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4431fc75-3beb-408f-8981-ef409291bd2d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.326017 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.326054 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg9r6\" (UniqueName: \"kubernetes.io/projected/4431fc75-3beb-408f-8981-ef409291bd2d-kube-api-access-jg9r6\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.326155 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.326199 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427423 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg9r6\" (UniqueName: \"kubernetes.io/projected/4431fc75-3beb-408f-8981-ef409291bd2d-kube-api-access-jg9r6\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427496 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427545 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427588 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427625 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4431fc75-3beb-408f-8981-ef409291bd2d-config\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427677 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4431fc75-3beb-408f-8981-ef409291bd2d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427699 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4431fc75-3beb-408f-8981-ef409291bd2d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.427753 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.428366 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.433889 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.434991 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.436557 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4431fc75-3beb-408f-8981-ef409291bd2d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.437310 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4431fc75-3beb-408f-8981-ef409291bd2d-config\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.438910 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4431fc75-3beb-408f-8981-ef409291bd2d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.442468 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4431fc75-3beb-408f-8981-ef409291bd2d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.443341 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg9r6\" (UniqueName: \"kubernetes.io/projected/4431fc75-3beb-408f-8981-ef409291bd2d-kube-api-access-jg9r6\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.461494 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4431fc75-3beb-408f-8981-ef409291bd2d\") " pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:39 crc kubenswrapper[4664]: I1013 07:01:39.562564 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 13 07:01:40 crc kubenswrapper[4664]: I1013 07:01:40.347211 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 13 07:01:41 crc kubenswrapper[4664]: I1013 07:01:41.125103 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4431fc75-3beb-408f-8981-ef409291bd2d","Type":"ContainerStarted","Data":"12c2bc7d48bac03531b4e3b5fe57f06f98ec4f25c01fce19c8a60ce360795e55"} Oct 13 07:01:42 crc kubenswrapper[4664]: I1013 07:01:42.111974 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.175963 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-jdzlh" event={"ID":"849abee9-7bc4-4b49-82d3-a98fd5f192f4","Type":"ContainerStarted","Data":"5e9a4e5da0fb77fc84dfa3cf527cf8df07d6dcccd679d18d1ee6842e55b33633"} Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.209952 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-jdzlh" podStartSLOduration=2.463771873 podStartE2EDuration="7.209921307s" podCreationTimestamp="2025-10-13 07:01:37 +0000 UTC" firstStartedPulling="2025-10-13 07:01:38.64073285 +0000 UTC m=+906.328178042" lastFinishedPulling="2025-10-13 07:01:43.386882274 +0000 UTC m=+911.074327476" observedRunningTime="2025-10-13 07:01:44.194710784 +0000 UTC m=+911.882155986" watchObservedRunningTime="2025-10-13 07:01:44.209921307 +0000 UTC m=+911.897366499" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.711256 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7596fbdcc-q4cvr"] Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.729516 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-559448fdbc-fw7cx"] Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.737435 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.753168 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.761699 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-559448fdbc-fw7cx"] Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.858986 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-sb\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.859113 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-config\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.859146 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-dns-svc\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.859233 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vr87\" (UniqueName: \"kubernetes.io/projected/f1c77c7b-1798-49db-b7f4-8addf6719a12-kube-api-access-8vr87\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.859307 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-nb\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.960885 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-nb\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.961027 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-sb\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.961065 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-config\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.961094 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-dns-svc\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.961136 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vr87\" (UniqueName: \"kubernetes.io/projected/f1c77c7b-1798-49db-b7f4-8addf6719a12-kube-api-access-8vr87\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.962018 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-config\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.962029 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-sb\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.962057 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-dns-svc\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.962795 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-nb\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:44 crc kubenswrapper[4664]: I1013 07:01:44.982005 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vr87\" (UniqueName: \"kubernetes.io/projected/f1c77c7b-1798-49db-b7f4-8addf6719a12-kube-api-access-8vr87\") pod \"dnsmasq-dns-559448fdbc-fw7cx\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:45 crc kubenswrapper[4664]: I1013 07:01:45.093221 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:01:45 crc kubenswrapper[4664]: I1013 07:01:45.583411 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-559448fdbc-fw7cx"] Oct 13 07:01:45 crc kubenswrapper[4664]: W1013 07:01:45.598880 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1c77c7b_1798_49db_b7f4_8addf6719a12.slice/crio-fc243d40c4ac6c854f9563b6c2ba52a1f9fe904d347a9e30d0c1ed6a26eb552c WatchSource:0}: Error finding container fc243d40c4ac6c854f9563b6c2ba52a1f9fe904d347a9e30d0c1ed6a26eb552c: Status 404 returned error can't find the container with id fc243d40c4ac6c854f9563b6c2ba52a1f9fe904d347a9e30d0c1ed6a26eb552c Oct 13 07:01:46 crc kubenswrapper[4664]: I1013 07:01:46.193923 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" event={"ID":"f1c77c7b-1798-49db-b7f4-8addf6719a12","Type":"ContainerStarted","Data":"fc243d40c4ac6c854f9563b6c2ba52a1f9fe904d347a9e30d0c1ed6a26eb552c"} Oct 13 07:02:09 crc kubenswrapper[4664]: E1013 07:02:09.530554 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:09 crc kubenswrapper[4664]: E1013 07:02:09.531183 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:09 crc kubenswrapper[4664]: E1013 07:02:09.531359 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5x8wx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:02:09 crc kubenswrapper[4664]: E1013 07:02:09.532601 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.307133 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-memcached:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.308394 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-memcached:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.308940 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-memcached:92672cd85fd36317d65faa0525acf849,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n584h64chbbh584h569h5d9h7dhc5hf8h696h568hf4h9h57h675h78h5chc7h5bbh565h77h68ch568h77h668h674h58dh58dh5c9h694h5bfhbbq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bzhwl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(5f87e03e-f1be-4f12-a267-393fcde6e51e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.310602 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="5f87e03e-f1be-4f12-a267-393fcde6e51e" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.397439 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-memcached:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/memcached-0" podUID="5f87e03e-f1be-4f12-a267-393fcde6e51e" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.397555 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.582020 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.582062 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.582164 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dmxcf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(cc0510b6-15a8-4d1a-93c3-f92869340539): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.583785 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.864906 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-ovn-sb-db-server:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.864959 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-ovn-sb-db-server:92672cd85fd36317d65faa0525acf849" Oct 13 07:02:10 crc kubenswrapper[4664]: E1013 07:02:10.865093 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-sb,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-ovn-sb-db-server:92672cd85fd36317d65faa0525acf849,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5bfh5fh7bh677hd5h65bh5f9h569h5bch75h68fh558h694h695h59chf6h6bh59dh584h8chb4h685h5f6h5bch86h66ch566h5f6hf9hd6h79h675q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-sb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jg9r6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-sb-0_openstack(4431fc75-3beb-408f-8981-ef409291bd2d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:02:11 crc kubenswrapper[4664]: E1013 07:02:11.418189 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" Oct 13 07:02:11 crc kubenswrapper[4664]: E1013 07:02:11.582868 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-sb-0" podUID="4431fc75-3beb-408f-8981-ef409291bd2d" Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.414385 4664 generic.go:334] "Generic (PLEG): container finished" podID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerID="5a8965dffea5ccc8ebe9134d099376d1687efce75e9bcf15c5cf4690c48bce8a" exitCode=0 Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.414446 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" event={"ID":"2e5e3189-3355-4f5e-a87e-d1ee7a77d221","Type":"ContainerDied","Data":"5a8965dffea5ccc8ebe9134d099376d1687efce75e9bcf15c5cf4690c48bce8a"} Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.418937 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" event={"ID":"f1c77c7b-1798-49db-b7f4-8addf6719a12","Type":"ContainerStarted","Data":"c3cb732b8da4a7b3ba19ab0789018601cc1a905a4875ff97ae09bd669f144151"} Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.421228 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9294acab-d17a-4e3b-bf85-19429846ca0c","Type":"ContainerStarted","Data":"7bff7b422a80ab98b98edb29f5d0b205afaa2a810f56f4d231008ebd61e28b1a"} Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.422700 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95" event={"ID":"7c8ef9b2-22fd-4d00-b710-8e22b4fefecf","Type":"ContainerStarted","Data":"e4ddd90e9adf5f16e3b8c01b54d5207c920167ff4034dceed7383722c94b2695"} Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.422938 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-47b95" Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.424753 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4431fc75-3beb-408f-8981-ef409291bd2d","Type":"ContainerStarted","Data":"f3909c47f9fbd0a11387d8a72d2df03538c7417bae35aae33f698569004e935c"} Oct 13 07:02:12 crc kubenswrapper[4664]: E1013 07:02:12.425554 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-ovn-sb-db-server:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="4431fc75-3beb-408f-8981-ef409291bd2d" Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.426072 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frzn7" event={"ID":"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb","Type":"ContainerStarted","Data":"86406a6230c6fa93d1a2c37645f484be3b35b10ceaeb6a280212109bc46e5873"} Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.427123 4664 generic.go:334] "Generic (PLEG): container finished" podID="1b530111-87c3-4bf7-9e59-5a6faffa86c9" containerID="8f663047f63b69036ff106b97236d28abc0083b321b6300b02409e1f87d06b56" exitCode=0 Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.427169 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" event={"ID":"1b530111-87c3-4bf7-9e59-5a6faffa86c9","Type":"ContainerDied","Data":"8f663047f63b69036ff106b97236d28abc0083b321b6300b02409e1f87d06b56"} Oct 13 07:02:12 crc kubenswrapper[4664]: I1013 07:02:12.621146 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-47b95" podStartSLOduration=4.178747027 podStartE2EDuration="38.621126119s" podCreationTimestamp="2025-10-13 07:01:34 +0000 UTC" firstStartedPulling="2025-10-13 07:01:36.877852321 +0000 UTC m=+904.565297513" lastFinishedPulling="2025-10-13 07:02:11.320231413 +0000 UTC m=+939.007676605" observedRunningTime="2025-10-13 07:02:12.534220068 +0000 UTC m=+940.221665270" watchObservedRunningTime="2025-10-13 07:02:12.621126119 +0000 UTC m=+940.308571301" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.033181 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.202333 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-dns-svc\") pod \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.202416 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-config\") pod \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.202492 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5xs6\" (UniqueName: \"kubernetes.io/projected/1b530111-87c3-4bf7-9e59-5a6faffa86c9-kube-api-access-m5xs6\") pod \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\" (UID: \"1b530111-87c3-4bf7-9e59-5a6faffa86c9\") " Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.226971 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b530111-87c3-4bf7-9e59-5a6faffa86c9-kube-api-access-m5xs6" (OuterVolumeSpecName: "kube-api-access-m5xs6") pod "1b530111-87c3-4bf7-9e59-5a6faffa86c9" (UID: "1b530111-87c3-4bf7-9e59-5a6faffa86c9"). InnerVolumeSpecName "kube-api-access-m5xs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.229708 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-config" (OuterVolumeSpecName: "config") pod "1b530111-87c3-4bf7-9e59-5a6faffa86c9" (UID: "1b530111-87c3-4bf7-9e59-5a6faffa86c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.250375 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1b530111-87c3-4bf7-9e59-5a6faffa86c9" (UID: "1b530111-87c3-4bf7-9e59-5a6faffa86c9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.304863 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.304899 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b530111-87c3-4bf7-9e59-5a6faffa86c9-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.304914 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5xs6\" (UniqueName: \"kubernetes.io/projected/1b530111-87c3-4bf7-9e59-5a6faffa86c9-kube-api-access-m5xs6\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.440209 4664 generic.go:334] "Generic (PLEG): container finished" podID="64150d45-539f-46e7-82ca-aa2e9eec369b" containerID="c0c2f8e8295f1ecf711b1f2242739f98d64aef1c5a724bb5d3df913994f9ca86" exitCode=0 Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.440292 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" event={"ID":"64150d45-539f-46e7-82ca-aa2e9eec369b","Type":"ContainerDied","Data":"c0c2f8e8295f1ecf711b1f2242739f98d64aef1c5a724bb5d3df913994f9ca86"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.443324 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" event={"ID":"2e5e3189-3355-4f5e-a87e-d1ee7a77d221","Type":"ContainerStarted","Data":"523a636f101b652576daab8c768617c1f39e3f5eb0cf5b8a26d8f4174b0873e5"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.443743 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.445832 4664 generic.go:334] "Generic (PLEG): container finished" podID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerID="c3cb732b8da4a7b3ba19ab0789018601cc1a905a4875ff97ae09bd669f144151" exitCode=0 Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.445916 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" event={"ID":"f1c77c7b-1798-49db-b7f4-8addf6719a12","Type":"ContainerDied","Data":"c3cb732b8da4a7b3ba19ab0789018601cc1a905a4875ff97ae09bd669f144151"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.454517 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9294acab-d17a-4e3b-bf85-19429846ca0c","Type":"ContainerStarted","Data":"c81ff6ec3d615e5228a5868b03934d84875152058766143ac1276a504ae5347a"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.462634 4664 generic.go:334] "Generic (PLEG): container finished" podID="64156830-a987-4481-a1e4-8d6fbe9fc22b" containerID="66a806463c65e23dc1a4812cb513bc9b310b39ada5cf0c5bdc086317364aeeda" exitCode=0 Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.462736 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-656586ff77-jkszh" event={"ID":"64156830-a987-4481-a1e4-8d6fbe9fc22b","Type":"ContainerDied","Data":"66a806463c65e23dc1a4812cb513bc9b310b39ada5cf0c5bdc086317364aeeda"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.465460 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b3c7c8de-e6ed-440c-9f15-635fa77e35e7","Type":"ContainerStarted","Data":"fa4dc12ff6be47dd21c58800f264181d51d1d13bcb68c135a914dab962750129"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.480786 4664 generic.go:334] "Generic (PLEG): container finished" podID="ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" containerID="fa7d361e8e254f4c25b12d88e3007908962e6c84148889bdf7de7c6ef09753c1" exitCode=0 Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.481154 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" event={"ID":"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c","Type":"ContainerDied","Data":"fa7d361e8e254f4c25b12d88e3007908962e6c84148889bdf7de7c6ef09753c1"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.488043 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"180df95d-8f2f-4266-ae45-f9237a3aa1b7","Type":"ContainerStarted","Data":"02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.491631 4664 generic.go:334] "Generic (PLEG): container finished" podID="e96bba9a-6e2d-49e9-9543-a58d6c5de1fb" containerID="86406a6230c6fa93d1a2c37645f484be3b35b10ceaeb6a280212109bc46e5873" exitCode=0 Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.491714 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frzn7" event={"ID":"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb","Type":"ContainerDied","Data":"86406a6230c6fa93d1a2c37645f484be3b35b10ceaeb6a280212109bc46e5873"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.502297 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=5.721231821 podStartE2EDuration="39.502274692s" podCreationTimestamp="2025-10-13 07:01:34 +0000 UTC" firstStartedPulling="2025-10-13 07:01:37.429539719 +0000 UTC m=+905.116984961" lastFinishedPulling="2025-10-13 07:02:11.21058264 +0000 UTC m=+938.898027832" observedRunningTime="2025-10-13 07:02:13.501717706 +0000 UTC m=+941.189162918" watchObservedRunningTime="2025-10-13 07:02:13.502274692 +0000 UTC m=+941.189719924" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.514825 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.514870 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69bb789bb9-h2knf" event={"ID":"1b530111-87c3-4bf7-9e59-5a6faffa86c9","Type":"ContainerDied","Data":"99aceecfcbe337cc25976861974d07449991c5b84214d0531aeb85273bd06968"} Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.514909 4664 scope.go:117] "RemoveContainer" containerID="8f663047f63b69036ff106b97236d28abc0083b321b6300b02409e1f87d06b56" Oct 13 07:02:13 crc kubenswrapper[4664]: E1013 07:02:13.518306 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-ovn-sb-db-server:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="4431fc75-3beb-408f-8981-ef409291bd2d" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.555861 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" podStartSLOduration=4.250395661 podStartE2EDuration="36.55584599s" podCreationTimestamp="2025-10-13 07:01:37 +0000 UTC" firstStartedPulling="2025-10-13 07:01:38.905139461 +0000 UTC m=+906.592584653" lastFinishedPulling="2025-10-13 07:02:11.21058979 +0000 UTC m=+938.898034982" observedRunningTime="2025-10-13 07:02:13.552573244 +0000 UTC m=+941.240018436" watchObservedRunningTime="2025-10-13 07:02:13.55584599 +0000 UTC m=+941.243291182" Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.867862 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69bb789bb9-h2knf"] Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.882609 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-69bb789bb9-h2knf"] Oct 13 07:02:13 crc kubenswrapper[4664]: I1013 07:02:13.985336 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.117429 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qq8j\" (UniqueName: \"kubernetes.io/projected/64156830-a987-4481-a1e4-8d6fbe9fc22b-kube-api-access-8qq8j\") pod \"64156830-a987-4481-a1e4-8d6fbe9fc22b\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.117572 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64156830-a987-4481-a1e4-8d6fbe9fc22b-config\") pod \"64156830-a987-4481-a1e4-8d6fbe9fc22b\" (UID: \"64156830-a987-4481-a1e4-8d6fbe9fc22b\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.126734 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64156830-a987-4481-a1e4-8d6fbe9fc22b-kube-api-access-8qq8j" (OuterVolumeSpecName: "kube-api-access-8qq8j") pod "64156830-a987-4481-a1e4-8d6fbe9fc22b" (UID: "64156830-a987-4481-a1e4-8d6fbe9fc22b"). InnerVolumeSpecName "kube-api-access-8qq8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.138648 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64156830-a987-4481-a1e4-8d6fbe9fc22b-config" (OuterVolumeSpecName: "config") pod "64156830-a987-4481-a1e4-8d6fbe9fc22b" (UID: "64156830-a987-4481-a1e4-8d6fbe9fc22b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.171641 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.176077 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.219706 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qq8j\" (UniqueName: \"kubernetes.io/projected/64156830-a987-4481-a1e4-8d6fbe9fc22b-kube-api-access-8qq8j\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.219739 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64156830-a987-4481-a1e4-8d6fbe9fc22b-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.320450 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmtd5\" (UniqueName: \"kubernetes.io/projected/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-kube-api-access-mmtd5\") pod \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.320501 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-config\") pod \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.320602 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-dns-svc\") pod \"64150d45-539f-46e7-82ca-aa2e9eec369b\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.320657 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-config\") pod \"64150d45-539f-46e7-82ca-aa2e9eec369b\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.320691 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql5p6\" (UniqueName: \"kubernetes.io/projected/64150d45-539f-46e7-82ca-aa2e9eec369b-kube-api-access-ql5p6\") pod \"64150d45-539f-46e7-82ca-aa2e9eec369b\" (UID: \"64150d45-539f-46e7-82ca-aa2e9eec369b\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.320744 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-dns-svc\") pod \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\" (UID: \"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c\") " Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.327907 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64150d45-539f-46e7-82ca-aa2e9eec369b-kube-api-access-ql5p6" (OuterVolumeSpecName: "kube-api-access-ql5p6") pod "64150d45-539f-46e7-82ca-aa2e9eec369b" (UID: "64150d45-539f-46e7-82ca-aa2e9eec369b"). InnerVolumeSpecName "kube-api-access-ql5p6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.328036 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-kube-api-access-mmtd5" (OuterVolumeSpecName: "kube-api-access-mmtd5") pod "ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" (UID: "ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c"). InnerVolumeSpecName "kube-api-access-mmtd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.339132 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" (UID: "ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.339419 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-config" (OuterVolumeSpecName: "config") pod "ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" (UID: "ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.353060 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "64150d45-539f-46e7-82ca-aa2e9eec369b" (UID: "64150d45-539f-46e7-82ca-aa2e9eec369b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.353142 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-config" (OuterVolumeSpecName: "config") pod "64150d45-539f-46e7-82ca-aa2e9eec369b" (UID: "64150d45-539f-46e7-82ca-aa2e9eec369b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.422271 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.422299 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64150d45-539f-46e7-82ca-aa2e9eec369b-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.422309 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql5p6\" (UniqueName: \"kubernetes.io/projected/64150d45-539f-46e7-82ca-aa2e9eec369b-kube-api-access-ql5p6\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.422320 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.422329 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmtd5\" (UniqueName: \"kubernetes.io/projected/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-kube-api-access-mmtd5\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.422336 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.524624 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" event={"ID":"f1c77c7b-1798-49db-b7f4-8addf6719a12","Type":"ContainerStarted","Data":"41caa6e75524cbc0905f5b391d024e9defff1bb21963f4d9ecd98cd0f0df1809"} Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.524817 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.526189 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" event={"ID":"ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c","Type":"ContainerDied","Data":"2867c7c52f41c87cc601be9d88aff70676444fd19aedfe38053d59c1cf7238f1"} Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.526226 4664 scope.go:117] "RemoveContainer" containerID="fa7d361e8e254f4c25b12d88e3007908962e6c84148889bdf7de7c6ef09753c1" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.526446 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7596fbdcc-q4cvr" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.527399 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-656586ff77-jkszh" event={"ID":"64156830-a987-4481-a1e4-8d6fbe9fc22b","Type":"ContainerDied","Data":"ae4bb7a5234d55f25493aec876da6a5f9e4027cbd701bb68e07d3217f77f1cf2"} Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.527445 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-656586ff77-jkszh" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.530399 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frzn7" event={"ID":"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb","Type":"ContainerStarted","Data":"3569028973a460ae902daa53dcfc56b58d8e499bfdf23938c7272ff30fac2fff"} Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.530426 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-frzn7" event={"ID":"e96bba9a-6e2d-49e9-9543-a58d6c5de1fb","Type":"ContainerStarted","Data":"d825009f6d0c4fd92ed4c9eaa2ce7dcecf9557c0425643ea3605283d44932798"} Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.530686 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.530853 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.533878 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" event={"ID":"64150d45-539f-46e7-82ca-aa2e9eec369b","Type":"ContainerDied","Data":"493f464cb87059524e0fda5d005794c64c68721a69363b40682091d468f7bc7d"} Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.533944 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7876f7ff45-xz4jl" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.555681 4664 scope.go:117] "RemoveContainer" containerID="66a806463c65e23dc1a4812cb513bc9b310b39ada5cf0c5bdc086317364aeeda" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.591742 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-frzn7" podStartSLOduration=6.361132635 podStartE2EDuration="40.591721919s" podCreationTimestamp="2025-10-13 07:01:34 +0000 UTC" firstStartedPulling="2025-10-13 07:01:36.876480254 +0000 UTC m=+904.563925446" lastFinishedPulling="2025-10-13 07:02:11.107069538 +0000 UTC m=+938.794514730" observedRunningTime="2025-10-13 07:02:14.585928626 +0000 UTC m=+942.273373848" watchObservedRunningTime="2025-10-13 07:02:14.591721919 +0000 UTC m=+942.279167111" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.594372 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" podStartSLOduration=4.98780036 podStartE2EDuration="30.594359499s" podCreationTimestamp="2025-10-13 07:01:44 +0000 UTC" firstStartedPulling="2025-10-13 07:01:45.609578558 +0000 UTC m=+913.297023750" lastFinishedPulling="2025-10-13 07:02:11.216137687 +0000 UTC m=+938.903582889" observedRunningTime="2025-10-13 07:02:14.561013776 +0000 UTC m=+942.248458968" watchObservedRunningTime="2025-10-13 07:02:14.594359499 +0000 UTC m=+942.281804691" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.598745 4664 scope.go:117] "RemoveContainer" containerID="c0c2f8e8295f1ecf711b1f2242739f98d64aef1c5a724bb5d3df913994f9ca86" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.633194 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.645249 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-656586ff77-jkszh"] Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.658886 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-656586ff77-jkszh"] Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.670662 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7596fbdcc-q4cvr"] Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.685069 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7596fbdcc-q4cvr"] Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.711977 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7876f7ff45-xz4jl"] Oct 13 07:02:14 crc kubenswrapper[4664]: I1013 07:02:14.717434 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7876f7ff45-xz4jl"] Oct 13 07:02:15 crc kubenswrapper[4664]: I1013 07:02:15.056696 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b530111-87c3-4bf7-9e59-5a6faffa86c9" path="/var/lib/kubelet/pods/1b530111-87c3-4bf7-9e59-5a6faffa86c9/volumes" Oct 13 07:02:15 crc kubenswrapper[4664]: I1013 07:02:15.057940 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64150d45-539f-46e7-82ca-aa2e9eec369b" path="/var/lib/kubelet/pods/64150d45-539f-46e7-82ca-aa2e9eec369b/volumes" Oct 13 07:02:15 crc kubenswrapper[4664]: I1013 07:02:15.058602 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64156830-a987-4481-a1e4-8d6fbe9fc22b" path="/var/lib/kubelet/pods/64156830-a987-4481-a1e4-8d6fbe9fc22b/volumes" Oct 13 07:02:15 crc kubenswrapper[4664]: I1013 07:02:15.059178 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" path="/var/lib/kubelet/pods/ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c/volumes" Oct 13 07:02:15 crc kubenswrapper[4664]: I1013 07:02:15.632957 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Oct 13 07:02:17 crc kubenswrapper[4664]: I1013 07:02:17.699462 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Oct 13 07:02:17 crc kubenswrapper[4664]: I1013 07:02:17.766918 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Oct 13 07:02:18 crc kubenswrapper[4664]: I1013 07:02:18.282441 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.095300 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.155082 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7494c5547c-rgdd7"] Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.155330 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" podUID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerName="dnsmasq-dns" containerID="cri-o://523a636f101b652576daab8c768617c1f39e3f5eb0cf5b8a26d8f4174b0873e5" gracePeriod=10 Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.590609 4664 generic.go:334] "Generic (PLEG): container finished" podID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerID="523a636f101b652576daab8c768617c1f39e3f5eb0cf5b8a26d8f4174b0873e5" exitCode=0 Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.590652 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" event={"ID":"2e5e3189-3355-4f5e-a87e-d1ee7a77d221","Type":"ContainerDied","Data":"523a636f101b652576daab8c768617c1f39e3f5eb0cf5b8a26d8f4174b0873e5"} Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.590681 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" event={"ID":"2e5e3189-3355-4f5e-a87e-d1ee7a77d221","Type":"ContainerDied","Data":"7b178b5b65e22aaf898f0969ad88e3eb60a732c849ba37688c7a2c97225de76d"} Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.590696 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b178b5b65e22aaf898f0969ad88e3eb60a732c849ba37688c7a2c97225de76d" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.642520 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.775331 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-config\") pod \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.777258 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-ovsdbserver-nb\") pod \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.777497 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-dns-svc\") pod \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.777566 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96clr\" (UniqueName: \"kubernetes.io/projected/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-kube-api-access-96clr\") pod \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\" (UID: \"2e5e3189-3355-4f5e-a87e-d1ee7a77d221\") " Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.782721 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-kube-api-access-96clr" (OuterVolumeSpecName: "kube-api-access-96clr") pod "2e5e3189-3355-4f5e-a87e-d1ee7a77d221" (UID: "2e5e3189-3355-4f5e-a87e-d1ee7a77d221"). InnerVolumeSpecName "kube-api-access-96clr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.817207 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2e5e3189-3355-4f5e-a87e-d1ee7a77d221" (UID: "2e5e3189-3355-4f5e-a87e-d1ee7a77d221"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.822881 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2e5e3189-3355-4f5e-a87e-d1ee7a77d221" (UID: "2e5e3189-3355-4f5e-a87e-d1ee7a77d221"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.829311 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-config" (OuterVolumeSpecName: "config") pod "2e5e3189-3355-4f5e-a87e-d1ee7a77d221" (UID: "2e5e3189-3355-4f5e-a87e-d1ee7a77d221"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.879318 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.879351 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96clr\" (UniqueName: \"kubernetes.io/projected/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-kube-api-access-96clr\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.879361 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:20 crc kubenswrapper[4664]: I1013 07:02:20.879371 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5e3189-3355-4f5e-a87e-d1ee7a77d221-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:21 crc kubenswrapper[4664]: I1013 07:02:21.598324 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7494c5547c-rgdd7" Oct 13 07:02:21 crc kubenswrapper[4664]: I1013 07:02:21.618106 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7494c5547c-rgdd7"] Oct 13 07:02:21 crc kubenswrapper[4664]: I1013 07:02:21.626237 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7494c5547c-rgdd7"] Oct 13 07:02:23 crc kubenswrapper[4664]: I1013 07:02:23.062891 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" path="/var/lib/kubelet/pods/2e5e3189-3355-4f5e-a87e-d1ee7a77d221/volumes" Oct 13 07:02:24 crc kubenswrapper[4664]: I1013 07:02:24.629480 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc0510b6-15a8-4d1a-93c3-f92869340539","Type":"ContainerStarted","Data":"f54d06f8d67a53905318d662e4d245ae3ea13637aaf7202131e97b8219426bc8"} Oct 13 07:02:25 crc kubenswrapper[4664]: I1013 07:02:25.639221 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931","Type":"ContainerStarted","Data":"df3280743399a72d09ae55c954199ce7c25f76d4049490719d23207c69b50fe9"} Oct 13 07:02:27 crc kubenswrapper[4664]: I1013 07:02:27.655064 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"5f87e03e-f1be-4f12-a267-393fcde6e51e","Type":"ContainerStarted","Data":"fbb75ae684d3cc60936e399535a144cec66ca7a8dfb27ef89dc25203f39b266d"} Oct 13 07:02:28 crc kubenswrapper[4664]: I1013 07:02:28.663028 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Oct 13 07:02:28 crc kubenswrapper[4664]: I1013 07:02:28.685082 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.464088798 podStartE2EDuration="59.685061567s" podCreationTimestamp="2025-10-13 07:01:29 +0000 UTC" firstStartedPulling="2025-10-13 07:01:30.788707536 +0000 UTC m=+898.476152718" lastFinishedPulling="2025-10-13 07:02:27.009680295 +0000 UTC m=+954.697125487" observedRunningTime="2025-10-13 07:02:28.67832247 +0000 UTC m=+956.365767682" watchObservedRunningTime="2025-10-13 07:02:28.685061567 +0000 UTC m=+956.372506769" Oct 13 07:02:33 crc kubenswrapper[4664]: I1013 07:02:33.704107 4664 generic.go:334] "Generic (PLEG): container finished" podID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerID="f54d06f8d67a53905318d662e4d245ae3ea13637aaf7202131e97b8219426bc8" exitCode=0 Oct 13 07:02:33 crc kubenswrapper[4664]: I1013 07:02:33.704148 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc0510b6-15a8-4d1a-93c3-f92869340539","Type":"ContainerDied","Data":"f54d06f8d67a53905318d662e4d245ae3ea13637aaf7202131e97b8219426bc8"} Oct 13 07:02:35 crc kubenswrapper[4664]: I1013 07:02:35.140907 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Oct 13 07:02:35 crc kubenswrapper[4664]: I1013 07:02:35.724272 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc0510b6-15a8-4d1a-93c3-f92869340539","Type":"ContainerStarted","Data":"507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42"} Oct 13 07:02:35 crc kubenswrapper[4664]: I1013 07:02:35.726661 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4431fc75-3beb-408f-8981-ef409291bd2d","Type":"ContainerStarted","Data":"5a93a5b6be278a92982f5beeb82a962d3ac54aed4ba7360830f095973bbd546b"} Oct 13 07:02:35 crc kubenswrapper[4664]: I1013 07:02:35.743949 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=14.026166995 podStartE2EDuration="1m8.74392332s" podCreationTimestamp="2025-10-13 07:01:27 +0000 UTC" firstStartedPulling="2025-10-13 07:01:29.037212678 +0000 UTC m=+896.724657860" lastFinishedPulling="2025-10-13 07:02:23.754968973 +0000 UTC m=+951.442414185" observedRunningTime="2025-10-13 07:02:35.741986928 +0000 UTC m=+963.429432120" watchObservedRunningTime="2025-10-13 07:02:35.74392332 +0000 UTC m=+963.431368522" Oct 13 07:02:35 crc kubenswrapper[4664]: I1013 07:02:35.765672 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.527461078 podStartE2EDuration="57.765648275s" podCreationTimestamp="2025-10-13 07:01:38 +0000 UTC" firstStartedPulling="2025-10-13 07:01:40.363290652 +0000 UTC m=+908.050735834" lastFinishedPulling="2025-10-13 07:02:34.601477829 +0000 UTC m=+962.288923031" observedRunningTime="2025-10-13 07:02:35.758169467 +0000 UTC m=+963.445614649" watchObservedRunningTime="2025-10-13 07:02:35.765648275 +0000 UTC m=+963.453093457" Oct 13 07:02:36 crc kubenswrapper[4664]: I1013 07:02:36.563900 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Oct 13 07:02:37 crc kubenswrapper[4664]: I1013 07:02:37.764042 4664 generic.go:334] "Generic (PLEG): container finished" podID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerID="df3280743399a72d09ae55c954199ce7c25f76d4049490719d23207c69b50fe9" exitCode=0 Oct 13 07:02:37 crc kubenswrapper[4664]: I1013 07:02:37.764267 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931","Type":"ContainerDied","Data":"df3280743399a72d09ae55c954199ce7c25f76d4049490719d23207c69b50fe9"} Oct 13 07:02:38 crc kubenswrapper[4664]: E1013 07:02:38.288329 4664 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:54822->38.102.83.223:37357: write tcp 38.102.83.223:54822->38.102.83.223:37357: write: broken pipe Oct 13 07:02:38 crc kubenswrapper[4664]: I1013 07:02:38.428222 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 13 07:02:38 crc kubenswrapper[4664]: I1013 07:02:38.428570 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Oct 13 07:02:38 crc kubenswrapper[4664]: I1013 07:02:38.774464 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931","Type":"ContainerStarted","Data":"19489a9e2a956a6eac4a3a695039618d5f0e1b7dc771e7427d7391a00e2a6983"} Oct 13 07:02:39 crc kubenswrapper[4664]: I1013 07:02:39.563677 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Oct 13 07:02:39 crc kubenswrapper[4664]: I1013 07:02:39.610565 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Oct 13 07:02:39 crc kubenswrapper[4664]: I1013 07:02:39.639331 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371965.215464 podStartE2EDuration="1m11.639312996s" podCreationTimestamp="2025-10-13 07:01:28 +0000 UTC" firstStartedPulling="2025-10-13 07:01:30.570168229 +0000 UTC m=+898.257613421" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:02:38.805085906 +0000 UTC m=+966.492531108" watchObservedRunningTime="2025-10-13 07:02:39.639312996 +0000 UTC m=+967.326758188" Oct 13 07:02:39 crc kubenswrapper[4664]: I1013 07:02:39.843595 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.031857 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.031916 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.122503 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Oct 13 07:02:40 crc kubenswrapper[4664]: E1013 07:02:40.122807 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.122819 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: E1013 07:02:40.122832 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerName="dnsmasq-dns" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.122838 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerName="dnsmasq-dns" Oct 13 07:02:40 crc kubenswrapper[4664]: E1013 07:02:40.122854 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64156830-a987-4481-a1e4-8d6fbe9fc22b" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.122860 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="64156830-a987-4481-a1e4-8d6fbe9fc22b" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: E1013 07:02:40.122871 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.122877 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: E1013 07:02:40.122885 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64150d45-539f-46e7-82ca-aa2e9eec369b" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.122892 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="64150d45-539f-46e7-82ca-aa2e9eec369b" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: E1013 07:02:40.122905 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b530111-87c3-4bf7-9e59-5a6faffa86c9" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.122910 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b530111-87c3-4bf7-9e59-5a6faffa86c9" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.123067 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="64150d45-539f-46e7-82ca-aa2e9eec369b" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.123073 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e5e3189-3355-4f5e-a87e-d1ee7a77d221" containerName="dnsmasq-dns" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.123085 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b530111-87c3-4bf7-9e59-5a6faffa86c9" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.123095 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="64156830-a987-4481-a1e4-8d6fbe9fc22b" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.123103 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff1e67ca-3da1-48cf-b5f4-4bd7f693e78c" containerName="init" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.123885 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.128051 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-9vd96" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.128118 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.128962 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.148452 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.155093 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.234452 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.234518 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-config\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.234556 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.234595 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-scripts\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.234787 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.234876 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.234970 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtjt4\" (UniqueName: \"kubernetes.io/projected/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-kube-api-access-vtjt4\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336132 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336174 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-config\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336196 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336218 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-scripts\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336255 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336273 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336301 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtjt4\" (UniqueName: \"kubernetes.io/projected/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-kube-api-access-vtjt4\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.336929 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.337253 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-config\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.337273 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-scripts\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.346681 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.348697 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.348742 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.355055 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtjt4\" (UniqueName: \"kubernetes.io/projected/1cfedf4f-ec80-462c-a8a3-cda5afa7e451-kube-api-access-vtjt4\") pod \"ovn-northd-0\" (UID: \"1cfedf4f-ec80-462c-a8a3-cda5afa7e451\") " pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.457839 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.539420 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.650183 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Oct 13 07:02:40 crc kubenswrapper[4664]: I1013 07:02:40.972417 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 13 07:02:41 crc kubenswrapper[4664]: I1013 07:02:41.795466 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1cfedf4f-ec80-462c-a8a3-cda5afa7e451","Type":"ContainerStarted","Data":"3c72becf641c3644367874554edc7662d9e6ed2061bcce1110b71574afe6b2e3"} Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.288022 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d5c4f869-tbdgk"] Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.289556 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.323746 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d5c4f869-tbdgk"] Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.373592 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-sb\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.373651 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-config\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.373679 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-nb\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.373699 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-dns-svc\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.373845 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcxv2\" (UniqueName: \"kubernetes.io/projected/42f42e84-f556-4409-8f8f-3f7f529681e8-kube-api-access-qcxv2\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.475359 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-nb\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.475974 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-dns-svc\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.476081 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxv2\" (UniqueName: \"kubernetes.io/projected/42f42e84-f556-4409-8f8f-3f7f529681e8-kube-api-access-qcxv2\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.476451 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-sb\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.476505 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-config\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.476820 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-nb\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.476828 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-dns-svc\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.477290 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-config\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.477889 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-sb\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.496909 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcxv2\" (UniqueName: \"kubernetes.io/projected/42f42e84-f556-4409-8f8f-3f7f529681e8-kube-api-access-qcxv2\") pod \"dnsmasq-dns-d5c4f869-tbdgk\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.635850 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.809377 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1cfedf4f-ec80-462c-a8a3-cda5afa7e451","Type":"ContainerStarted","Data":"b3b3601c9b4ba16266d061ec6a19bf9156993f5c70deeb393b01749402ae812e"} Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.809647 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1cfedf4f-ec80-462c-a8a3-cda5afa7e451","Type":"ContainerStarted","Data":"b70f3d9e49de14cd861e626775b90ce91f1689d06ba05575b4c13d06f45a2e95"} Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.810057 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Oct 13 07:02:42 crc kubenswrapper[4664]: I1013 07:02:42.829285 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.153738577 podStartE2EDuration="2.829251323s" podCreationTimestamp="2025-10-13 07:02:40 +0000 UTC" firstStartedPulling="2025-10-13 07:02:40.981657681 +0000 UTC m=+968.669102883" lastFinishedPulling="2025-10-13 07:02:41.657170437 +0000 UTC m=+969.344615629" observedRunningTime="2025-10-13 07:02:42.828471073 +0000 UTC m=+970.515916275" watchObservedRunningTime="2025-10-13 07:02:42.829251323 +0000 UTC m=+970.516696515" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.077567 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d5c4f869-tbdgk"] Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.401302 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.407751 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.412649 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.412681 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.412693 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.413003 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-26fkh" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.424700 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.490903 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.490979 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrxd2\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-kube-api-access-jrxd2\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.491013 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/9953b8b0-42bc-4192-8791-d0564fa27f10-cache\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.491056 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.491138 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/9953b8b0-42bc-4192-8791-d0564fa27f10-lock\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.592304 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/9953b8b0-42bc-4192-8791-d0564fa27f10-lock\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.592680 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.592884 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrxd2\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-kube-api-access-jrxd2\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.593059 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/9953b8b0-42bc-4192-8791-d0564fa27f10-cache\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.593289 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.593851 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: E1013 07:02:43.594698 4664 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 13 07:02:43 crc kubenswrapper[4664]: E1013 07:02:43.594725 4664 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.594722 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/9953b8b0-42bc-4192-8791-d0564fa27f10-lock\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: E1013 07:02:43.594840 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift podName:9953b8b0-42bc-4192-8791-d0564fa27f10 nodeName:}" failed. No retries permitted until 2025-10-13 07:02:44.094824895 +0000 UTC m=+971.782270087 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift") pod "swift-storage-0" (UID: "9953b8b0-42bc-4192-8791-d0564fa27f10") : configmap "swift-ring-files" not found Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.594958 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/9953b8b0-42bc-4192-8791-d0564fa27f10-cache\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.626334 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrxd2\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-kube-api-access-jrxd2\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.636380 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.653765 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-c9fnl"] Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.654871 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.658249 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.658684 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.658831 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.663353 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-c9fnl"] Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.796593 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-combined-ca-bundle\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.796656 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-etc-swift\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.796698 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-swiftconf\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.796733 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxnz5\" (UniqueName: \"kubernetes.io/projected/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-kube-api-access-jxnz5\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.796836 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-ring-data-devices\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.796876 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-scripts\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.796919 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-dispersionconf\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.819895 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" event={"ID":"42f42e84-f556-4409-8f8f-3f7f529681e8","Type":"ContainerDied","Data":"c3119ae1ffa43e0e2da6b7d90790aafc396973ce93245b2639c114067a4939a1"} Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.819903 4664 generic.go:334] "Generic (PLEG): container finished" podID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerID="c3119ae1ffa43e0e2da6b7d90790aafc396973ce93245b2639c114067a4939a1" exitCode=0 Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.820121 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" event={"ID":"42f42e84-f556-4409-8f8f-3f7f529681e8","Type":"ContainerStarted","Data":"82066fe6775790d745ac629e99589478cac13110aa8dec143ad9b5f1a8b5811f"} Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.898956 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-combined-ca-bundle\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.899016 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-etc-swift\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.899055 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-swiftconf\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.899088 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxnz5\" (UniqueName: \"kubernetes.io/projected/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-kube-api-access-jxnz5\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.899133 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-ring-data-devices\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.899177 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-scripts\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.899245 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-dispersionconf\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.900640 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-scripts\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.902785 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-etc-swift\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.904461 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-combined-ca-bundle\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.905162 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-dispersionconf\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.906565 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-ring-data-devices\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.909589 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-swiftconf\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:43 crc kubenswrapper[4664]: I1013 07:02:43.923915 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxnz5\" (UniqueName: \"kubernetes.io/projected/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-kube-api-access-jxnz5\") pod \"swift-ring-rebalance-c9fnl\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.013899 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.102665 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:44 crc kubenswrapper[4664]: E1013 07:02:44.102997 4664 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 13 07:02:44 crc kubenswrapper[4664]: E1013 07:02:44.103024 4664 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 13 07:02:44 crc kubenswrapper[4664]: E1013 07:02:44.103083 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift podName:9953b8b0-42bc-4192-8791-d0564fa27f10 nodeName:}" failed. No retries permitted until 2025-10-13 07:02:45.103065522 +0000 UTC m=+972.790510714 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift") pod "swift-storage-0" (UID: "9953b8b0-42bc-4192-8791-d0564fa27f10") : configmap "swift-ring-files" not found Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.459096 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-c9fnl"] Oct 13 07:02:44 crc kubenswrapper[4664]: W1013 07:02:44.467777 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1cb7b09_b7ec_4d03_b998_5442ec0ba9de.slice/crio-123f63663a1f3dc9f529f75d6a23f56f17943433ef79209f9f7b3dc9f1324937 WatchSource:0}: Error finding container 123f63663a1f3dc9f529f75d6a23f56f17943433ef79209f9f7b3dc9f1324937: Status 404 returned error can't find the container with id 123f63663a1f3dc9f529f75d6a23f56f17943433ef79209f9f7b3dc9f1324937 Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.760651 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.825338 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.832091 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c9fnl" event={"ID":"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de","Type":"ContainerStarted","Data":"123f63663a1f3dc9f529f75d6a23f56f17943433ef79209f9f7b3dc9f1324937"} Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.834455 4664 generic.go:334] "Generic (PLEG): container finished" podID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerID="fa4dc12ff6be47dd21c58800f264181d51d1d13bcb68c135a914dab962750129" exitCode=0 Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.834524 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b3c7c8de-e6ed-440c-9f15-635fa77e35e7","Type":"ContainerDied","Data":"fa4dc12ff6be47dd21c58800f264181d51d1d13bcb68c135a914dab962750129"} Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.839743 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" event={"ID":"42f42e84-f556-4409-8f8f-3f7f529681e8","Type":"ContainerStarted","Data":"4a1606c49d573aaac49165605e04dec2b558f744092d222b91cc3a66f5ba5305"} Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.839853 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.842778 4664 generic.go:334] "Generic (PLEG): container finished" podID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerID="02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07" exitCode=0 Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.842918 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"180df95d-8f2f-4266-ae45-f9237a3aa1b7","Type":"ContainerDied","Data":"02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07"} Oct 13 07:02:44 crc kubenswrapper[4664]: I1013 07:02:44.895128 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podStartSLOduration=2.895110075 podStartE2EDuration="2.895110075s" podCreationTimestamp="2025-10-13 07:02:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:02:44.879242854 +0000 UTC m=+972.566688056" watchObservedRunningTime="2025-10-13 07:02:44.895110075 +0000 UTC m=+972.582555267" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.118678 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:45 crc kubenswrapper[4664]: E1013 07:02:45.118861 4664 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 13 07:02:45 crc kubenswrapper[4664]: E1013 07:02:45.118875 4664 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 13 07:02:45 crc kubenswrapper[4664]: E1013 07:02:45.118910 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift podName:9953b8b0-42bc-4192-8791-d0564fa27f10 nodeName:}" failed. No retries permitted until 2025-10-13 07:02:47.118897071 +0000 UTC m=+974.806342263 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift") pod "swift-storage-0" (UID: "9953b8b0-42bc-4192-8791-d0564fa27f10") : configmap "swift-ring-files" not found Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.210337 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-47b95" podUID="7c8ef9b2-22fd-4d00-b710-8e22b4fefecf" containerName="ovn-controller" probeResult="failure" output=< Oct 13 07:02:45 crc kubenswrapper[4664]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Oct 13 07:02:45 crc kubenswrapper[4664]: > Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.242400 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.267728 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-frzn7" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.420147 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-472ms"] Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.423112 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-472ms" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.455819 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-472ms"] Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.534477 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-47b95-config-x8kk4"] Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.535781 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.538837 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.540186 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-x8kk4"] Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.625655 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzdzx\" (UniqueName: \"kubernetes.io/projected/e8864712-58ae-4769-a998-f78da5eaf5ab-kube-api-access-bzdzx\") pod \"glance-db-create-472ms\" (UID: \"e8864712-58ae-4769-a998-f78da5eaf5ab\") " pod="openstack/glance-db-create-472ms" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.732055 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-additional-scripts\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.732177 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzdzx\" (UniqueName: \"kubernetes.io/projected/e8864712-58ae-4769-a998-f78da5eaf5ab-kube-api-access-bzdzx\") pod \"glance-db-create-472ms\" (UID: \"e8864712-58ae-4769-a998-f78da5eaf5ab\") " pod="openstack/glance-db-create-472ms" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.732222 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vb4ph\" (UniqueName: \"kubernetes.io/projected/c02214e8-743a-4565-86d5-1304ce189bba-kube-api-access-vb4ph\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.732290 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-log-ovn\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.732318 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.732340 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run-ovn\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.732378 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-scripts\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.777547 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzdzx\" (UniqueName: \"kubernetes.io/projected/e8864712-58ae-4769-a998-f78da5eaf5ab-kube-api-access-bzdzx\") pod \"glance-db-create-472ms\" (UID: \"e8864712-58ae-4769-a998-f78da5eaf5ab\") " pod="openstack/glance-db-create-472ms" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.834743 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-scripts\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.834867 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-additional-scripts\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.834947 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vb4ph\" (UniqueName: \"kubernetes.io/projected/c02214e8-743a-4565-86d5-1304ce189bba-kube-api-access-vb4ph\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.834982 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-log-ovn\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.835001 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.835047 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run-ovn\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.835507 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.835585 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-log-ovn\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.835830 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run-ovn\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.836304 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-additional-scripts\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.836901 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-scripts\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.858453 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b3c7c8de-e6ed-440c-9f15-635fa77e35e7","Type":"ContainerStarted","Data":"7de8814af1773315d641d3144577e78145e2036b7f885755ec4daf8d88b0c09d"} Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.859732 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.864138 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"180df95d-8f2f-4266-ae45-f9237a3aa1b7","Type":"ContainerStarted","Data":"2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41"} Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.866147 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vb4ph\" (UniqueName: \"kubernetes.io/projected/c02214e8-743a-4565-86d5-1304ce189bba-kube-api-access-vb4ph\") pod \"ovn-controller-47b95-config-x8kk4\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.885144 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.776225317 podStartE2EDuration="1m20.88512301s" podCreationTimestamp="2025-10-13 07:01:25 +0000 UTC" firstStartedPulling="2025-10-13 07:01:26.943047746 +0000 UTC m=+894.630492938" lastFinishedPulling="2025-10-13 07:02:11.051945439 +0000 UTC m=+938.739390631" observedRunningTime="2025-10-13 07:02:45.880929789 +0000 UTC m=+973.568374991" watchObservedRunningTime="2025-10-13 07:02:45.88512301 +0000 UTC m=+973.572568202" Oct 13 07:02:45 crc kubenswrapper[4664]: I1013 07:02:45.923341 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.423782621 podStartE2EDuration="1m20.923228256s" podCreationTimestamp="2025-10-13 07:01:25 +0000 UTC" firstStartedPulling="2025-10-13 07:01:27.552474114 +0000 UTC m=+895.239919306" lastFinishedPulling="2025-10-13 07:02:11.051919749 +0000 UTC m=+938.739364941" observedRunningTime="2025-10-13 07:02:45.907407963 +0000 UTC m=+973.594853165" watchObservedRunningTime="2025-10-13 07:02:45.923228256 +0000 UTC m=+973.610673448" Oct 13 07:02:46 crc kubenswrapper[4664]: I1013 07:02:46.041147 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-472ms" Oct 13 07:02:46 crc kubenswrapper[4664]: I1013 07:02:46.155427 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:46 crc kubenswrapper[4664]: I1013 07:02:46.570012 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-472ms"] Oct 13 07:02:46 crc kubenswrapper[4664]: I1013 07:02:46.784967 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-x8kk4"] Oct 13 07:02:47 crc kubenswrapper[4664]: I1013 07:02:47.005733 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:02:47 crc kubenswrapper[4664]: I1013 07:02:47.160299 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:47 crc kubenswrapper[4664]: E1013 07:02:47.161908 4664 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 13 07:02:47 crc kubenswrapper[4664]: E1013 07:02:47.161930 4664 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 13 07:02:47 crc kubenswrapper[4664]: E1013 07:02:47.161987 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift podName:9953b8b0-42bc-4192-8791-d0564fa27f10 nodeName:}" failed. No retries permitted until 2025-10-13 07:02:51.161968287 +0000 UTC m=+978.849413479 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift") pod "swift-storage-0" (UID: "9953b8b0-42bc-4192-8791-d0564fa27f10") : configmap "swift-ring-files" not found Oct 13 07:02:48 crc kubenswrapper[4664]: W1013 07:02:48.737836 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8864712_58ae_4769_a998_f78da5eaf5ab.slice/crio-f5370e3d6b084a5a7407cf33a38d5b69177da567175fae20de0868e6f26d5d68 WatchSource:0}: Error finding container f5370e3d6b084a5a7407cf33a38d5b69177da567175fae20de0868e6f26d5d68: Status 404 returned error can't find the container with id f5370e3d6b084a5a7407cf33a38d5b69177da567175fae20de0868e6f26d5d68 Oct 13 07:02:48 crc kubenswrapper[4664]: W1013 07:02:48.742978 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc02214e8_743a_4565_86d5_1304ce189bba.slice/crio-5c57841f3e4d60eb103ecf43240182dfdda2d77a6b96444d7dfbbdefc141ea01 WatchSource:0}: Error finding container 5c57841f3e4d60eb103ecf43240182dfdda2d77a6b96444d7dfbbdefc141ea01: Status 404 returned error can't find the container with id 5c57841f3e4d60eb103ecf43240182dfdda2d77a6b96444d7dfbbdefc141ea01 Oct 13 07:02:48 crc kubenswrapper[4664]: I1013 07:02:48.897198 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-x8kk4" event={"ID":"c02214e8-743a-4565-86d5-1304ce189bba","Type":"ContainerStarted","Data":"5c57841f3e4d60eb103ecf43240182dfdda2d77a6b96444d7dfbbdefc141ea01"} Oct 13 07:02:48 crc kubenswrapper[4664]: I1013 07:02:48.899335 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-472ms" event={"ID":"e8864712-58ae-4769-a998-f78da5eaf5ab","Type":"ContainerStarted","Data":"f5370e3d6b084a5a7407cf33a38d5b69177da567175fae20de0868e6f26d5d68"} Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.638642 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-4cxhf"] Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.640249 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-4cxhf" Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.646853 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-4cxhf"] Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.812867 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m76xb\" (UniqueName: \"kubernetes.io/projected/b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598-kube-api-access-m76xb\") pod \"keystone-db-create-4cxhf\" (UID: \"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598\") " pod="openstack/keystone-db-create-4cxhf" Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.907021 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c9fnl" event={"ID":"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de","Type":"ContainerStarted","Data":"be1f1dd06ed66a23cf3fc86582c6e2c4f5451b668b8f5da07c1dc4c12c79fc87"} Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.908490 4664 generic.go:334] "Generic (PLEG): container finished" podID="e8864712-58ae-4769-a998-f78da5eaf5ab" containerID="10ba63a4f580a93d2b34b9b5f4af63b5694b661dc66be9637d8e2e7317b5d7d2" exitCode=0 Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.908551 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-472ms" event={"ID":"e8864712-58ae-4769-a998-f78da5eaf5ab","Type":"ContainerDied","Data":"10ba63a4f580a93d2b34b9b5f4af63b5694b661dc66be9637d8e2e7317b5d7d2"} Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.910752 4664 generic.go:334] "Generic (PLEG): container finished" podID="c02214e8-743a-4565-86d5-1304ce189bba" containerID="65fc8909ea63a857d9cb64e8069c756e8fd98ffc81aee4dca7c70734a83161d4" exitCode=0 Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.910859 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-x8kk4" event={"ID":"c02214e8-743a-4565-86d5-1304ce189bba","Type":"ContainerDied","Data":"65fc8909ea63a857d9cb64e8069c756e8fd98ffc81aee4dca7c70734a83161d4"} Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.914522 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m76xb\" (UniqueName: \"kubernetes.io/projected/b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598-kube-api-access-m76xb\") pod \"keystone-db-create-4cxhf\" (UID: \"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598\") " pod="openstack/keystone-db-create-4cxhf" Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.935947 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-c9fnl" podStartSLOduration=2.598501112 podStartE2EDuration="6.935930152s" podCreationTimestamp="2025-10-13 07:02:43 +0000 UTC" firstStartedPulling="2025-10-13 07:02:44.470660336 +0000 UTC m=+972.158105528" lastFinishedPulling="2025-10-13 07:02:48.808089376 +0000 UTC m=+976.495534568" observedRunningTime="2025-10-13 07:02:49.931656388 +0000 UTC m=+977.619101580" watchObservedRunningTime="2025-10-13 07:02:49.935930152 +0000 UTC m=+977.623375344" Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.947267 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-zgqsr"] Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.948234 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zgqsr" Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.955299 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m76xb\" (UniqueName: \"kubernetes.io/projected/b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598-kube-api-access-m76xb\") pod \"keystone-db-create-4cxhf\" (UID: \"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598\") " pod="openstack/keystone-db-create-4cxhf" Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.967015 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zgqsr"] Oct 13 07:02:49 crc kubenswrapper[4664]: I1013 07:02:49.993173 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-4cxhf" Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.116946 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls8j5\" (UniqueName: \"kubernetes.io/projected/6dbf0b57-1653-44ef-a493-0d5aebc30318-kube-api-access-ls8j5\") pod \"placement-db-create-zgqsr\" (UID: \"6dbf0b57-1653-44ef-a493-0d5aebc30318\") " pod="openstack/placement-db-create-zgqsr" Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.210998 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-47b95" Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.218260 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls8j5\" (UniqueName: \"kubernetes.io/projected/6dbf0b57-1653-44ef-a493-0d5aebc30318-kube-api-access-ls8j5\") pod \"placement-db-create-zgqsr\" (UID: \"6dbf0b57-1653-44ef-a493-0d5aebc30318\") " pod="openstack/placement-db-create-zgqsr" Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.241721 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls8j5\" (UniqueName: \"kubernetes.io/projected/6dbf0b57-1653-44ef-a493-0d5aebc30318-kube-api-access-ls8j5\") pod \"placement-db-create-zgqsr\" (UID: \"6dbf0b57-1653-44ef-a493-0d5aebc30318\") " pod="openstack/placement-db-create-zgqsr" Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.289884 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-4cxhf"] Oct 13 07:02:50 crc kubenswrapper[4664]: W1013 07:02:50.298926 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0eb1ddf_3b6c_4615_b8f8_ef3134b6d598.slice/crio-395b6299cac37a185e59d0c978cee4c84f0c49c9ac3cd4f8fba6782499c8406a WatchSource:0}: Error finding container 395b6299cac37a185e59d0c978cee4c84f0c49c9ac3cd4f8fba6782499c8406a: Status 404 returned error can't find the container with id 395b6299cac37a185e59d0c978cee4c84f0c49c9ac3cd4f8fba6782499c8406a Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.377349 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zgqsr" Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.853692 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zgqsr"] Oct 13 07:02:50 crc kubenswrapper[4664]: W1013 07:02:50.856722 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6dbf0b57_1653_44ef_a493_0d5aebc30318.slice/crio-fca621b8a21a1e34a3acc8c57e7dc9b270916a7db9fc3cba3518dbcdef6f53b1 WatchSource:0}: Error finding container fca621b8a21a1e34a3acc8c57e7dc9b270916a7db9fc3cba3518dbcdef6f53b1: Status 404 returned error can't find the container with id fca621b8a21a1e34a3acc8c57e7dc9b270916a7db9fc3cba3518dbcdef6f53b1 Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.927170 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zgqsr" event={"ID":"6dbf0b57-1653-44ef-a493-0d5aebc30318","Type":"ContainerStarted","Data":"fca621b8a21a1e34a3acc8c57e7dc9b270916a7db9fc3cba3518dbcdef6f53b1"} Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.932501 4664 generic.go:334] "Generic (PLEG): container finished" podID="b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598" containerID="26160e0d45a37ab6eec436ccdf57a11521fbbce6adc6ef2da3a81ab61239ae53" exitCode=0 Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.932600 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-4cxhf" event={"ID":"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598","Type":"ContainerDied","Data":"26160e0d45a37ab6eec436ccdf57a11521fbbce6adc6ef2da3a81ab61239ae53"} Oct 13 07:02:50 crc kubenswrapper[4664]: I1013 07:02:50.932678 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-4cxhf" event={"ID":"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598","Type":"ContainerStarted","Data":"395b6299cac37a185e59d0c978cee4c84f0c49c9ac3cd4f8fba6782499c8406a"} Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.233220 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:51 crc kubenswrapper[4664]: E1013 07:02:51.234007 4664 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 13 07:02:51 crc kubenswrapper[4664]: E1013 07:02:51.234038 4664 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 13 07:02:51 crc kubenswrapper[4664]: E1013 07:02:51.234093 4664 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift podName:9953b8b0-42bc-4192-8791-d0564fa27f10 nodeName:}" failed. No retries permitted until 2025-10-13 07:02:59.234075281 +0000 UTC m=+986.921520473 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift") pod "swift-storage-0" (UID: "9953b8b0-42bc-4192-8791-d0564fa27f10") : configmap "swift-ring-files" not found Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.336527 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.343508 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-472ms" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.435780 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-log-ovn\") pod \"c02214e8-743a-4565-86d5-1304ce189bba\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.435849 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run\") pod \"c02214e8-743a-4565-86d5-1304ce189bba\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.435891 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "c02214e8-743a-4565-86d5-1304ce189bba" (UID: "c02214e8-743a-4565-86d5-1304ce189bba"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.435921 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-scripts\") pod \"c02214e8-743a-4565-86d5-1304ce189bba\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.435932 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run" (OuterVolumeSpecName: "var-run") pod "c02214e8-743a-4565-86d5-1304ce189bba" (UID: "c02214e8-743a-4565-86d5-1304ce189bba"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.435939 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzdzx\" (UniqueName: \"kubernetes.io/projected/e8864712-58ae-4769-a998-f78da5eaf5ab-kube-api-access-bzdzx\") pod \"e8864712-58ae-4769-a998-f78da5eaf5ab\" (UID: \"e8864712-58ae-4769-a998-f78da5eaf5ab\") " Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.435976 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-additional-scripts\") pod \"c02214e8-743a-4565-86d5-1304ce189bba\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.436046 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run-ovn\") pod \"c02214e8-743a-4565-86d5-1304ce189bba\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.436602 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "c02214e8-743a-4565-86d5-1304ce189bba" (UID: "c02214e8-743a-4565-86d5-1304ce189bba"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.436631 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "c02214e8-743a-4565-86d5-1304ce189bba" (UID: "c02214e8-743a-4565-86d5-1304ce189bba"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.436078 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vb4ph\" (UniqueName: \"kubernetes.io/projected/c02214e8-743a-4565-86d5-1304ce189bba-kube-api-access-vb4ph\") pod \"c02214e8-743a-4565-86d5-1304ce189bba\" (UID: \"c02214e8-743a-4565-86d5-1304ce189bba\") " Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.436924 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-scripts" (OuterVolumeSpecName: "scripts") pod "c02214e8-743a-4565-86d5-1304ce189bba" (UID: "c02214e8-743a-4565-86d5-1304ce189bba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.437399 4664 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.437419 4664 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.437427 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.437435 4664 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c02214e8-743a-4565-86d5-1304ce189bba-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.437455 4664 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c02214e8-743a-4565-86d5-1304ce189bba-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.441670 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8864712-58ae-4769-a998-f78da5eaf5ab-kube-api-access-bzdzx" (OuterVolumeSpecName: "kube-api-access-bzdzx") pod "e8864712-58ae-4769-a998-f78da5eaf5ab" (UID: "e8864712-58ae-4769-a998-f78da5eaf5ab"). InnerVolumeSpecName "kube-api-access-bzdzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.442497 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c02214e8-743a-4565-86d5-1304ce189bba-kube-api-access-vb4ph" (OuterVolumeSpecName: "kube-api-access-vb4ph") pod "c02214e8-743a-4565-86d5-1304ce189bba" (UID: "c02214e8-743a-4565-86d5-1304ce189bba"). InnerVolumeSpecName "kube-api-access-vb4ph". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.538608 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzdzx\" (UniqueName: \"kubernetes.io/projected/e8864712-58ae-4769-a998-f78da5eaf5ab-kube-api-access-bzdzx\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.538661 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vb4ph\" (UniqueName: \"kubernetes.io/projected/c02214e8-743a-4565-86d5-1304ce189bba-kube-api-access-vb4ph\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.948523 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-x8kk4" event={"ID":"c02214e8-743a-4565-86d5-1304ce189bba","Type":"ContainerDied","Data":"5c57841f3e4d60eb103ecf43240182dfdda2d77a6b96444d7dfbbdefc141ea01"} Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.948624 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c57841f3e4d60eb103ecf43240182dfdda2d77a6b96444d7dfbbdefc141ea01" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.948743 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-x8kk4" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.954617 4664 generic.go:334] "Generic (PLEG): container finished" podID="6dbf0b57-1653-44ef-a493-0d5aebc30318" containerID="ee65af1affc1e0560d8e0cd747ad7cc83ecc46f1ab35e839d9ace78c72499225" exitCode=0 Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.954726 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zgqsr" event={"ID":"6dbf0b57-1653-44ef-a493-0d5aebc30318","Type":"ContainerDied","Data":"ee65af1affc1e0560d8e0cd747ad7cc83ecc46f1ab35e839d9ace78c72499225"} Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.959866 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-472ms" Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.962773 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-472ms" event={"ID":"e8864712-58ae-4769-a998-f78da5eaf5ab","Type":"ContainerDied","Data":"f5370e3d6b084a5a7407cf33a38d5b69177da567175fae20de0868e6f26d5d68"} Oct 13 07:02:51 crc kubenswrapper[4664]: I1013 07:02:51.962912 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5370e3d6b084a5a7407cf33a38d5b69177da567175fae20de0868e6f26d5d68" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.322927 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-4cxhf" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.451835 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m76xb\" (UniqueName: \"kubernetes.io/projected/b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598-kube-api-access-m76xb\") pod \"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598\" (UID: \"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598\") " Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.457648 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598-kube-api-access-m76xb" (OuterVolumeSpecName: "kube-api-access-m76xb") pod "b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598" (UID: "b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598"). InnerVolumeSpecName "kube-api-access-m76xb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.519358 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-47b95-config-x8kk4"] Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.524202 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-47b95-config-x8kk4"] Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.554209 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m76xb\" (UniqueName: \"kubernetes.io/projected/b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598-kube-api-access-m76xb\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.564045 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-47b95-config-skv8f"] Oct 13 07:02:52 crc kubenswrapper[4664]: E1013 07:02:52.564518 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598" containerName="mariadb-database-create" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.564611 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598" containerName="mariadb-database-create" Oct 13 07:02:52 crc kubenswrapper[4664]: E1013 07:02:52.564677 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8864712-58ae-4769-a998-f78da5eaf5ab" containerName="mariadb-database-create" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.564731 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8864712-58ae-4769-a998-f78da5eaf5ab" containerName="mariadb-database-create" Oct 13 07:02:52 crc kubenswrapper[4664]: E1013 07:02:52.565165 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c02214e8-743a-4565-86d5-1304ce189bba" containerName="ovn-config" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.565228 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="c02214e8-743a-4565-86d5-1304ce189bba" containerName="ovn-config" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.565439 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="c02214e8-743a-4565-86d5-1304ce189bba" containerName="ovn-config" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.565499 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598" containerName="mariadb-database-create" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.565572 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8864712-58ae-4769-a998-f78da5eaf5ab" containerName="mariadb-database-create" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.566276 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.569761 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.588247 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-skv8f"] Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.637064 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.707375 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-559448fdbc-fw7cx"] Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.707578 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" podUID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerName="dnsmasq-dns" containerID="cri-o://41caa6e75524cbc0905f5b391d024e9defff1bb21963f4d9ecd98cd0f0df1809" gracePeriod=10 Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.757662 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-additional-scripts\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.757746 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-scripts\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.757791 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.757828 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5nl6\" (UniqueName: \"kubernetes.io/projected/3004f225-7903-43b4-857d-67aa68745d31-kube-api-access-v5nl6\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.757849 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-log-ovn\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.757899 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run-ovn\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.859138 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-scripts\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.860995 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-scripts\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861124 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861151 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5nl6\" (UniqueName: \"kubernetes.io/projected/3004f225-7903-43b4-857d-67aa68745d31-kube-api-access-v5nl6\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861359 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861391 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-log-ovn\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861434 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run-ovn\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861468 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-additional-scripts\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861716 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-log-ovn\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.861885 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run-ovn\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.862165 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-additional-scripts\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.885526 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5nl6\" (UniqueName: \"kubernetes.io/projected/3004f225-7903-43b4-857d-67aa68745d31-kube-api-access-v5nl6\") pod \"ovn-controller-47b95-config-skv8f\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.987734 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-4cxhf" event={"ID":"b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598","Type":"ContainerDied","Data":"395b6299cac37a185e59d0c978cee4c84f0c49c9ac3cd4f8fba6782499c8406a"} Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.988039 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="395b6299cac37a185e59d0c978cee4c84f0c49c9ac3cd4f8fba6782499c8406a" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.988093 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-4cxhf" Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.997129 4664 generic.go:334] "Generic (PLEG): container finished" podID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerID="41caa6e75524cbc0905f5b391d024e9defff1bb21963f4d9ecd98cd0f0df1809" exitCode=0 Oct 13 07:02:52 crc kubenswrapper[4664]: I1013 07:02:52.997314 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" event={"ID":"f1c77c7b-1798-49db-b7f4-8addf6719a12","Type":"ContainerDied","Data":"41caa6e75524cbc0905f5b391d024e9defff1bb21963f4d9ecd98cd0f0df1809"} Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.075480 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c02214e8-743a-4565-86d5-1304ce189bba" path="/var/lib/kubelet/pods/c02214e8-743a-4565-86d5-1304ce189bba/volumes" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.179962 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.258435 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.366349 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zgqsr" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.377750 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-dns-svc\") pod \"f1c77c7b-1798-49db-b7f4-8addf6719a12\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.377825 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-config\") pod \"f1c77c7b-1798-49db-b7f4-8addf6719a12\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.377869 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-sb\") pod \"f1c77c7b-1798-49db-b7f4-8addf6719a12\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.377930 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-nb\") pod \"f1c77c7b-1798-49db-b7f4-8addf6719a12\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.377969 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vr87\" (UniqueName: \"kubernetes.io/projected/f1c77c7b-1798-49db-b7f4-8addf6719a12-kube-api-access-8vr87\") pod \"f1c77c7b-1798-49db-b7f4-8addf6719a12\" (UID: \"f1c77c7b-1798-49db-b7f4-8addf6719a12\") " Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.398053 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1c77c7b-1798-49db-b7f4-8addf6719a12-kube-api-access-8vr87" (OuterVolumeSpecName: "kube-api-access-8vr87") pod "f1c77c7b-1798-49db-b7f4-8addf6719a12" (UID: "f1c77c7b-1798-49db-b7f4-8addf6719a12"). InnerVolumeSpecName "kube-api-access-8vr87". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.441481 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-config" (OuterVolumeSpecName: "config") pod "f1c77c7b-1798-49db-b7f4-8addf6719a12" (UID: "f1c77c7b-1798-49db-b7f4-8addf6719a12"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.458504 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f1c77c7b-1798-49db-b7f4-8addf6719a12" (UID: "f1c77c7b-1798-49db-b7f4-8addf6719a12"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.460693 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f1c77c7b-1798-49db-b7f4-8addf6719a12" (UID: "f1c77c7b-1798-49db-b7f4-8addf6719a12"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.462316 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f1c77c7b-1798-49db-b7f4-8addf6719a12" (UID: "f1c77c7b-1798-49db-b7f4-8addf6719a12"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.479876 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls8j5\" (UniqueName: \"kubernetes.io/projected/6dbf0b57-1653-44ef-a493-0d5aebc30318-kube-api-access-ls8j5\") pod \"6dbf0b57-1653-44ef-a493-0d5aebc30318\" (UID: \"6dbf0b57-1653-44ef-a493-0d5aebc30318\") " Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.480251 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.480264 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.480272 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.480282 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1c77c7b-1798-49db-b7f4-8addf6719a12-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.480290 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vr87\" (UniqueName: \"kubernetes.io/projected/f1c77c7b-1798-49db-b7f4-8addf6719a12-kube-api-access-8vr87\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.482935 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dbf0b57-1653-44ef-a493-0d5aebc30318-kube-api-access-ls8j5" (OuterVolumeSpecName: "kube-api-access-ls8j5") pod "6dbf0b57-1653-44ef-a493-0d5aebc30318" (UID: "6dbf0b57-1653-44ef-a493-0d5aebc30318"). InnerVolumeSpecName "kube-api-access-ls8j5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.582439 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls8j5\" (UniqueName: \"kubernetes.io/projected/6dbf0b57-1653-44ef-a493-0d5aebc30318-kube-api-access-ls8j5\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:53 crc kubenswrapper[4664]: I1013 07:02:53.779082 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-skv8f"] Oct 13 07:02:53 crc kubenswrapper[4664]: W1013 07:02:53.799754 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3004f225_7903_43b4_857d_67aa68745d31.slice/crio-4d58dd9855dd80432ad407206495b9ac2c84b427f2175e9490d7920e194f91cc WatchSource:0}: Error finding container 4d58dd9855dd80432ad407206495b9ac2c84b427f2175e9490d7920e194f91cc: Status 404 returned error can't find the container with id 4d58dd9855dd80432ad407206495b9ac2c84b427f2175e9490d7920e194f91cc Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.006384 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zgqsr" event={"ID":"6dbf0b57-1653-44ef-a493-0d5aebc30318","Type":"ContainerDied","Data":"fca621b8a21a1e34a3acc8c57e7dc9b270916a7db9fc3cba3518dbcdef6f53b1"} Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.006660 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fca621b8a21a1e34a3acc8c57e7dc9b270916a7db9fc3cba3518dbcdef6f53b1" Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.006710 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zgqsr" Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.016890 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-skv8f" event={"ID":"3004f225-7903-43b4-857d-67aa68745d31","Type":"ContainerStarted","Data":"4d58dd9855dd80432ad407206495b9ac2c84b427f2175e9490d7920e194f91cc"} Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.019742 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" event={"ID":"f1c77c7b-1798-49db-b7f4-8addf6719a12","Type":"ContainerDied","Data":"fc243d40c4ac6c854f9563b6c2ba52a1f9fe904d347a9e30d0c1ed6a26eb552c"} Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.019788 4664 scope.go:117] "RemoveContainer" containerID="41caa6e75524cbc0905f5b391d024e9defff1bb21963f4d9ecd98cd0f0df1809" Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.019904 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-559448fdbc-fw7cx" Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.045512 4664 scope.go:117] "RemoveContainer" containerID="c3cb732b8da4a7b3ba19ab0789018601cc1a905a4875ff97ae09bd669f144151" Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.060515 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-559448fdbc-fw7cx"] Oct 13 07:02:54 crc kubenswrapper[4664]: I1013 07:02:54.075047 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-559448fdbc-fw7cx"] Oct 13 07:02:55 crc kubenswrapper[4664]: I1013 07:02:55.028300 4664 generic.go:334] "Generic (PLEG): container finished" podID="3004f225-7903-43b4-857d-67aa68745d31" containerID="d75eaeee258f07c74698fb46034db5e0bac8116ac1f3e4f3384ede45cb87b730" exitCode=0 Oct 13 07:02:55 crc kubenswrapper[4664]: I1013 07:02:55.028951 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-skv8f" event={"ID":"3004f225-7903-43b4-857d-67aa68745d31","Type":"ContainerDied","Data":"d75eaeee258f07c74698fb46034db5e0bac8116ac1f3e4f3384ede45cb87b730"} Oct 13 07:02:55 crc kubenswrapper[4664]: I1013 07:02:55.059577 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1c77c7b-1798-49db-b7f4-8addf6719a12" path="/var/lib/kubelet/pods/f1c77c7b-1798-49db-b7f4-8addf6719a12/volumes" Oct 13 07:02:55 crc kubenswrapper[4664]: I1013 07:02:55.534729 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.377412 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.532939 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run-ovn\") pod \"3004f225-7903-43b4-857d-67aa68745d31\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.534011 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-additional-scripts\") pod \"3004f225-7903-43b4-857d-67aa68745d31\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.534057 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5nl6\" (UniqueName: \"kubernetes.io/projected/3004f225-7903-43b4-857d-67aa68745d31-kube-api-access-v5nl6\") pod \"3004f225-7903-43b4-857d-67aa68745d31\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.534107 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-scripts\") pod \"3004f225-7903-43b4-857d-67aa68745d31\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.534132 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-log-ovn\") pod \"3004f225-7903-43b4-857d-67aa68745d31\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.534165 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run\") pod \"3004f225-7903-43b4-857d-67aa68745d31\" (UID: \"3004f225-7903-43b4-857d-67aa68745d31\") " Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.534603 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run" (OuterVolumeSpecName: "var-run") pod "3004f225-7903-43b4-857d-67aa68745d31" (UID: "3004f225-7903-43b4-857d-67aa68745d31"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.534645 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "3004f225-7903-43b4-857d-67aa68745d31" (UID: "3004f225-7903-43b4-857d-67aa68745d31"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.535552 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "3004f225-7903-43b4-857d-67aa68745d31" (UID: "3004f225-7903-43b4-857d-67aa68745d31"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.535594 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "3004f225-7903-43b4-857d-67aa68745d31" (UID: "3004f225-7903-43b4-857d-67aa68745d31"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.535735 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-scripts" (OuterVolumeSpecName: "scripts") pod "3004f225-7903-43b4-857d-67aa68745d31" (UID: "3004f225-7903-43b4-857d-67aa68745d31"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.549604 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3004f225-7903-43b4-857d-67aa68745d31-kube-api-access-v5nl6" (OuterVolumeSpecName: "kube-api-access-v5nl6") pod "3004f225-7903-43b4-857d-67aa68745d31" (UID: "3004f225-7903-43b4-857d-67aa68745d31"). InnerVolumeSpecName "kube-api-access-v5nl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.599938 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.636450 4664 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.636489 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5nl6\" (UniqueName: \"kubernetes.io/projected/3004f225-7903-43b4-857d-67aa68745d31-kube-api-access-v5nl6\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.636501 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3004f225-7903-43b4-857d-67aa68745d31-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.636512 4664 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.636522 4664 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:56 crc kubenswrapper[4664]: I1013 07:02:56.636535 4664 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3004f225-7903-43b4-857d-67aa68745d31-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006152 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-k459x"] Oct 13 07:02:57 crc kubenswrapper[4664]: E1013 07:02:57.006444 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerName="dnsmasq-dns" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006461 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerName="dnsmasq-dns" Oct 13 07:02:57 crc kubenswrapper[4664]: E1013 07:02:57.006474 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerName="init" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006480 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerName="init" Oct 13 07:02:57 crc kubenswrapper[4664]: E1013 07:02:57.006489 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3004f225-7903-43b4-857d-67aa68745d31" containerName="ovn-config" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006495 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3004f225-7903-43b4-857d-67aa68745d31" containerName="ovn-config" Oct 13 07:02:57 crc kubenswrapper[4664]: E1013 07:02:57.006519 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dbf0b57-1653-44ef-a493-0d5aebc30318" containerName="mariadb-database-create" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006525 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dbf0b57-1653-44ef-a493-0d5aebc30318" containerName="mariadb-database-create" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006661 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3004f225-7903-43b4-857d-67aa68745d31" containerName="ovn-config" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006678 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dbf0b57-1653-44ef-a493-0d5aebc30318" containerName="mariadb-database-create" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.006689 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1c77c7b-1798-49db-b7f4-8addf6719a12" containerName="dnsmasq-dns" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.007163 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k459x" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.009007 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.030028 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-k459x"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.051397 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-skv8f" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.058453 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-skv8f" event={"ID":"3004f225-7903-43b4-857d-67aa68745d31","Type":"ContainerDied","Data":"4d58dd9855dd80432ad407206495b9ac2c84b427f2175e9490d7920e194f91cc"} Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.058503 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d58dd9855dd80432ad407206495b9ac2c84b427f2175e9490d7920e194f91cc" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.122259 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-h49b2"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.125706 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h49b2" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.134768 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-h49b2"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.144821 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngh9r\" (UniqueName: \"kubernetes.io/projected/68d06993-8fe8-4f18-842b-a989f5f9c95c-kube-api-access-ngh9r\") pod \"cinder-db-create-k459x\" (UID: \"68d06993-8fe8-4f18-842b-a989f5f9c95c\") " pod="openstack/cinder-db-create-k459x" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.246911 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngh9r\" (UniqueName: \"kubernetes.io/projected/68d06993-8fe8-4f18-842b-a989f5f9c95c-kube-api-access-ngh9r\") pod \"cinder-db-create-k459x\" (UID: \"68d06993-8fe8-4f18-842b-a989f5f9c95c\") " pod="openstack/cinder-db-create-k459x" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.247564 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbqp9\" (UniqueName: \"kubernetes.io/projected/fcbed96d-d57c-44ce-98ad-9a17f7579163-kube-api-access-dbqp9\") pod \"barbican-db-create-h49b2\" (UID: \"fcbed96d-d57c-44ce-98ad-9a17f7579163\") " pod="openstack/barbican-db-create-h49b2" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.265405 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngh9r\" (UniqueName: \"kubernetes.io/projected/68d06993-8fe8-4f18-842b-a989f5f9c95c-kube-api-access-ngh9r\") pod \"cinder-db-create-k459x\" (UID: \"68d06993-8fe8-4f18-842b-a989f5f9c95c\") " pod="openstack/cinder-db-create-k459x" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.323220 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k459x" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.325206 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-g6pqn"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.353906 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-g6pqn"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.355430 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbqp9\" (UniqueName: \"kubernetes.io/projected/fcbed96d-d57c-44ce-98ad-9a17f7579163-kube-api-access-dbqp9\") pod \"barbican-db-create-h49b2\" (UID: \"fcbed96d-d57c-44ce-98ad-9a17f7579163\") " pod="openstack/barbican-db-create-h49b2" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.357952 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-g6pqn" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.416761 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbqp9\" (UniqueName: \"kubernetes.io/projected/fcbed96d-d57c-44ce-98ad-9a17f7579163-kube-api-access-dbqp9\") pod \"barbican-db-create-h49b2\" (UID: \"fcbed96d-d57c-44ce-98ad-9a17f7579163\") " pod="openstack/barbican-db-create-h49b2" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.441378 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-bdt9h"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.445138 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bdt9h" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.445402 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h49b2" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.464698 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghcwv\" (UniqueName: \"kubernetes.io/projected/b9e45296-e54a-4563-9c44-6cdc4e2e5640-kube-api-access-ghcwv\") pod \"heat-db-create-g6pqn\" (UID: \"b9e45296-e54a-4563-9c44-6cdc4e2e5640\") " pod="openstack/heat-db-create-g6pqn" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.503159 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-bdt9h"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.521271 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-47b95-config-skv8f"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.529293 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-47b95-config-skv8f"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.566230 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88zzm\" (UniqueName: \"kubernetes.io/projected/daa756df-de77-404f-b164-d668cd3a544b-kube-api-access-88zzm\") pod \"neutron-db-create-bdt9h\" (UID: \"daa756df-de77-404f-b164-d668cd3a544b\") " pod="openstack/neutron-db-create-bdt9h" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.566514 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghcwv\" (UniqueName: \"kubernetes.io/projected/b9e45296-e54a-4563-9c44-6cdc4e2e5640-kube-api-access-ghcwv\") pod \"heat-db-create-g6pqn\" (UID: \"b9e45296-e54a-4563-9c44-6cdc4e2e5640\") " pod="openstack/heat-db-create-g6pqn" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.588704 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-47b95-config-jmsc7"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.590188 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.597181 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.598513 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-jmsc7"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.605415 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghcwv\" (UniqueName: \"kubernetes.io/projected/b9e45296-e54a-4563-9c44-6cdc4e2e5640-kube-api-access-ghcwv\") pod \"heat-db-create-g6pqn\" (UID: \"b9e45296-e54a-4563-9c44-6cdc4e2e5640\") " pod="openstack/heat-db-create-g6pqn" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.668488 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88zzm\" (UniqueName: \"kubernetes.io/projected/daa756df-de77-404f-b164-d668cd3a544b-kube-api-access-88zzm\") pod \"neutron-db-create-bdt9h\" (UID: \"daa756df-de77-404f-b164-d668cd3a544b\") " pod="openstack/neutron-db-create-bdt9h" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.703235 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88zzm\" (UniqueName: \"kubernetes.io/projected/daa756df-de77-404f-b164-d668cd3a544b-kube-api-access-88zzm\") pod \"neutron-db-create-bdt9h\" (UID: \"daa756df-de77-404f-b164-d668cd3a544b\") " pod="openstack/neutron-db-create-bdt9h" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.772494 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run-ovn\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.772658 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-log-ovn\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.772702 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.772882 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-additional-scripts\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.773023 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-scripts\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.773152 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm4dh\" (UniqueName: \"kubernetes.io/projected/5947d238-8eb8-407b-a497-407d7cca98a1-kube-api-access-mm4dh\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.802047 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-k459x"] Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.854826 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-g6pqn" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.863915 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bdt9h" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875172 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-log-ovn\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875215 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875275 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-additional-scripts\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875337 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-scripts\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875370 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm4dh\" (UniqueName: \"kubernetes.io/projected/5947d238-8eb8-407b-a497-407d7cca98a1-kube-api-access-mm4dh\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875433 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run-ovn\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875743 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run-ovn\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875798 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-log-ovn\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.875901 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.876538 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-additional-scripts\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.879436 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-scripts\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.901425 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm4dh\" (UniqueName: \"kubernetes.io/projected/5947d238-8eb8-407b-a497-407d7cca98a1-kube-api-access-mm4dh\") pod \"ovn-controller-47b95-config-jmsc7\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:57 crc kubenswrapper[4664]: I1013 07:02:57.912969 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.102089 4664 generic.go:334] "Generic (PLEG): container finished" podID="b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" containerID="be1f1dd06ed66a23cf3fc86582c6e2c4f5451b668b8f5da07c1dc4c12c79fc87" exitCode=0 Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.102310 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c9fnl" event={"ID":"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de","Type":"ContainerDied","Data":"be1f1dd06ed66a23cf3fc86582c6e2c4f5451b668b8f5da07c1dc4c12c79fc87"} Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.114449 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-k459x" event={"ID":"68d06993-8fe8-4f18-842b-a989f5f9c95c","Type":"ContainerStarted","Data":"3a6befadbb9c15c919317d4556f5579c14730bcd1b1ec42238adac33865dc807"} Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.148970 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-k459x" podStartSLOduration=2.148950471 podStartE2EDuration="2.148950471s" podCreationTimestamp="2025-10-13 07:02:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:02:58.14706149 +0000 UTC m=+985.834506682" watchObservedRunningTime="2025-10-13 07:02:58.148950471 +0000 UTC m=+985.836395663" Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.198072 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-h49b2"] Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.342222 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-bdt9h"] Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.469912 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-jmsc7"] Oct 13 07:02:58 crc kubenswrapper[4664]: W1013 07:02:58.494653 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5947d238_8eb8_407b_a497_407d7cca98a1.slice/crio-fc98f164e539338e9e0899b7d39f8652b4c69995e41d4f16e6b721d3787ac7c1 WatchSource:0}: Error finding container fc98f164e539338e9e0899b7d39f8652b4c69995e41d4f16e6b721d3787ac7c1: Status 404 returned error can't find the container with id fc98f164e539338e9e0899b7d39f8652b4c69995e41d4f16e6b721d3787ac7c1 Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.519467 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-g6pqn"] Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.811497 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:02:58 crc kubenswrapper[4664]: I1013 07:02:58.811821 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.058167 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3004f225-7903-43b4-857d-67aa68745d31" path="/var/lib/kubelet/pods/3004f225-7903-43b4-857d-67aa68745d31/volumes" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.123696 4664 generic.go:334] "Generic (PLEG): container finished" podID="fcbed96d-d57c-44ce-98ad-9a17f7579163" containerID="67ad3ce4c7289a28e31c87521bd6d392222dd212b8a74fc9decd15bebfd95537" exitCode=0 Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.123758 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h49b2" event={"ID":"fcbed96d-d57c-44ce-98ad-9a17f7579163","Type":"ContainerDied","Data":"67ad3ce4c7289a28e31c87521bd6d392222dd212b8a74fc9decd15bebfd95537"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.123845 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h49b2" event={"ID":"fcbed96d-d57c-44ce-98ad-9a17f7579163","Type":"ContainerStarted","Data":"f1ce0c1e7673b0b43d90f7c17c6f26c9ab47a76adac4ac9fa5892e8bbcd38cf6"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.125349 4664 generic.go:334] "Generic (PLEG): container finished" podID="b9e45296-e54a-4563-9c44-6cdc4e2e5640" containerID="a1fd8125e060c9248e63ddd73ffaf0bbd6e9a4e9a86d7c71cdf57d890a9face2" exitCode=0 Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.125411 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-g6pqn" event={"ID":"b9e45296-e54a-4563-9c44-6cdc4e2e5640","Type":"ContainerDied","Data":"a1fd8125e060c9248e63ddd73ffaf0bbd6e9a4e9a86d7c71cdf57d890a9face2"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.125437 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-g6pqn" event={"ID":"b9e45296-e54a-4563-9c44-6cdc4e2e5640","Type":"ContainerStarted","Data":"c134d3d5d856e6bae56d5728d58f493979c1a7940dfd7b68526ab59043156ba2"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.126833 4664 generic.go:334] "Generic (PLEG): container finished" podID="68d06993-8fe8-4f18-842b-a989f5f9c95c" containerID="894f366308b7a378f9a4bc5b9cbe6b14b8441c1f0bcaee557bc991889a35a746" exitCode=0 Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.126873 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-k459x" event={"ID":"68d06993-8fe8-4f18-842b-a989f5f9c95c","Type":"ContainerDied","Data":"894f366308b7a378f9a4bc5b9cbe6b14b8441c1f0bcaee557bc991889a35a746"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.128184 4664 generic.go:334] "Generic (PLEG): container finished" podID="daa756df-de77-404f-b164-d668cd3a544b" containerID="5fb95c8cf34ea5ed92c038af39a74d876313962d6c8055def018aed4773fddf9" exitCode=0 Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.128222 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-bdt9h" event={"ID":"daa756df-de77-404f-b164-d668cd3a544b","Type":"ContainerDied","Data":"5fb95c8cf34ea5ed92c038af39a74d876313962d6c8055def018aed4773fddf9"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.128237 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-bdt9h" event={"ID":"daa756df-de77-404f-b164-d668cd3a544b","Type":"ContainerStarted","Data":"dabc0d6b426795465c2fb1cdf385903924e790f89bcc9b064bb5c1dac3fa4870"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.129613 4664 generic.go:334] "Generic (PLEG): container finished" podID="5947d238-8eb8-407b-a497-407d7cca98a1" containerID="3b29012776a6a1433dba5f301f050d73da8f81d2d8e6def071afdb7a0cbb3828" exitCode=0 Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.129789 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-jmsc7" event={"ID":"5947d238-8eb8-407b-a497-407d7cca98a1","Type":"ContainerDied","Data":"3b29012776a6a1433dba5f301f050d73da8f81d2d8e6def071afdb7a0cbb3828"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.129839 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-jmsc7" event={"ID":"5947d238-8eb8-407b-a497-407d7cca98a1","Type":"ContainerStarted","Data":"fc98f164e539338e9e0899b7d39f8652b4c69995e41d4f16e6b721d3787ac7c1"} Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.303901 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.311742 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9953b8b0-42bc-4192-8791-d0564fa27f10-etc-swift\") pod \"swift-storage-0\" (UID: \"9953b8b0-42bc-4192-8791-d0564fa27f10\") " pod="openstack/swift-storage-0" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.322441 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.547512 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.609617 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxnz5\" (UniqueName: \"kubernetes.io/projected/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-kube-api-access-jxnz5\") pod \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.609773 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-ring-data-devices\") pod \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.610708 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" (UID: "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.611067 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-etc-swift\") pod \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.611098 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-dispersionconf\") pod \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.611119 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-scripts\") pod \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.611155 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-swiftconf\") pod \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.611200 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-combined-ca-bundle\") pod \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\" (UID: \"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de\") " Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.611518 4664 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-ring-data-devices\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.612976 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" (UID: "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.633424 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-kube-api-access-jxnz5" (OuterVolumeSpecName: "kube-api-access-jxnz5") pod "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" (UID: "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de"). InnerVolumeSpecName "kube-api-access-jxnz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.648103 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" (UID: "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.649985 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-scripts" (OuterVolumeSpecName: "scripts") pod "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" (UID: "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.661659 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" (UID: "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.672218 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" (UID: "b1cb7b09-b7ec-4d03-b998-5442ec0ba9de"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.712853 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.712887 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxnz5\" (UniqueName: \"kubernetes.io/projected/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-kube-api-access-jxnz5\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.712899 4664 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.712907 4664 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-dispersionconf\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.712920 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.712929 4664 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1cb7b09-b7ec-4d03-b998-5442ec0ba9de-swiftconf\") on node \"crc\" DevicePath \"\"" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.783115 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-e6d7-account-create-bxc77"] Oct 13 07:02:59 crc kubenswrapper[4664]: E1013 07:02:59.783635 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" containerName="swift-ring-rebalance" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.783721 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" containerName="swift-ring-rebalance" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.783974 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1cb7b09-b7ec-4d03-b998-5442ec0ba9de" containerName="swift-ring-rebalance" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.784561 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-e6d7-account-create-bxc77" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.787648 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.803280 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-e6d7-account-create-bxc77"] Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.915519 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wsrm\" (UniqueName: \"kubernetes.io/projected/acd74efa-e542-450a-b7c9-91eee744d0e9-kube-api-access-6wsrm\") pod \"keystone-e6d7-account-create-bxc77\" (UID: \"acd74efa-e542-450a-b7c9-91eee744d0e9\") " pod="openstack/keystone-e6d7-account-create-bxc77" Oct 13 07:02:59 crc kubenswrapper[4664]: I1013 07:02:59.989477 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 13 07:02:59 crc kubenswrapper[4664]: W1013 07:02:59.995296 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9953b8b0_42bc_4192_8791_d0564fa27f10.slice/crio-638bd39e39348e366965b58dfb0435ac2ff8f68b9e08ed92d9dfce1903ecb516 WatchSource:0}: Error finding container 638bd39e39348e366965b58dfb0435ac2ff8f68b9e08ed92d9dfce1903ecb516: Status 404 returned error can't find the container with id 638bd39e39348e366965b58dfb0435ac2ff8f68b9e08ed92d9dfce1903ecb516 Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.016886 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wsrm\" (UniqueName: \"kubernetes.io/projected/acd74efa-e542-450a-b7c9-91eee744d0e9-kube-api-access-6wsrm\") pod \"keystone-e6d7-account-create-bxc77\" (UID: \"acd74efa-e542-450a-b7c9-91eee744d0e9\") " pod="openstack/keystone-e6d7-account-create-bxc77" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.034953 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wsrm\" (UniqueName: \"kubernetes.io/projected/acd74efa-e542-450a-b7c9-91eee744d0e9-kube-api-access-6wsrm\") pod \"keystone-e6d7-account-create-bxc77\" (UID: \"acd74efa-e542-450a-b7c9-91eee744d0e9\") " pod="openstack/keystone-e6d7-account-create-bxc77" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.088128 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-0a22-account-create-59l9x"] Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.089713 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0a22-account-create-59l9x" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.095091 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.104128 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0a22-account-create-59l9x"] Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.104818 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-e6d7-account-create-bxc77" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.163974 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c9fnl" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.164049 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c9fnl" event={"ID":"b1cb7b09-b7ec-4d03-b998-5442ec0ba9de","Type":"ContainerDied","Data":"123f63663a1f3dc9f529f75d6a23f56f17943433ef79209f9f7b3dc9f1324937"} Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.164089 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="123f63663a1f3dc9f529f75d6a23f56f17943433ef79209f9f7b3dc9f1324937" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.167754 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"638bd39e39348e366965b58dfb0435ac2ff8f68b9e08ed92d9dfce1903ecb516"} Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.220119 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9hp4\" (UniqueName: \"kubernetes.io/projected/34827b2f-cc99-4948-868a-be9095deadb1-kube-api-access-j9hp4\") pod \"placement-0a22-account-create-59l9x\" (UID: \"34827b2f-cc99-4948-868a-be9095deadb1\") " pod="openstack/placement-0a22-account-create-59l9x" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.325350 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9hp4\" (UniqueName: \"kubernetes.io/projected/34827b2f-cc99-4948-868a-be9095deadb1-kube-api-access-j9hp4\") pod \"placement-0a22-account-create-59l9x\" (UID: \"34827b2f-cc99-4948-868a-be9095deadb1\") " pod="openstack/placement-0a22-account-create-59l9x" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.352923 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9hp4\" (UniqueName: \"kubernetes.io/projected/34827b2f-cc99-4948-868a-be9095deadb1-kube-api-access-j9hp4\") pod \"placement-0a22-account-create-59l9x\" (UID: \"34827b2f-cc99-4948-868a-be9095deadb1\") " pod="openstack/placement-0a22-account-create-59l9x" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.418120 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0a22-account-create-59l9x" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.689811 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h49b2" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.803635 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-g6pqn" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.833469 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbqp9\" (UniqueName: \"kubernetes.io/projected/fcbed96d-d57c-44ce-98ad-9a17f7579163-kube-api-access-dbqp9\") pod \"fcbed96d-d57c-44ce-98ad-9a17f7579163\" (UID: \"fcbed96d-d57c-44ce-98ad-9a17f7579163\") " Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.842985 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcbed96d-d57c-44ce-98ad-9a17f7579163-kube-api-access-dbqp9" (OuterVolumeSpecName: "kube-api-access-dbqp9") pod "fcbed96d-d57c-44ce-98ad-9a17f7579163" (UID: "fcbed96d-d57c-44ce-98ad-9a17f7579163"). InnerVolumeSpecName "kube-api-access-dbqp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.937771 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghcwv\" (UniqueName: \"kubernetes.io/projected/b9e45296-e54a-4563-9c44-6cdc4e2e5640-kube-api-access-ghcwv\") pod \"b9e45296-e54a-4563-9c44-6cdc4e2e5640\" (UID: \"b9e45296-e54a-4563-9c44-6cdc4e2e5640\") " Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.938161 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbqp9\" (UniqueName: \"kubernetes.io/projected/fcbed96d-d57c-44ce-98ad-9a17f7579163-kube-api-access-dbqp9\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:00 crc kubenswrapper[4664]: I1013 07:03:00.941255 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9e45296-e54a-4563-9c44-6cdc4e2e5640-kube-api-access-ghcwv" (OuterVolumeSpecName: "kube-api-access-ghcwv") pod "b9e45296-e54a-4563-9c44-6cdc4e2e5640" (UID: "b9e45296-e54a-4563-9c44-6cdc4e2e5640"). InnerVolumeSpecName "kube-api-access-ghcwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.040645 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghcwv\" (UniqueName: \"kubernetes.io/projected/b9e45296-e54a-4563-9c44-6cdc4e2e5640-kube-api-access-ghcwv\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.060164 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-e6d7-account-create-bxc77"] Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.100216 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k459x" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.109143 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.123357 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bdt9h" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.187081 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-e6d7-account-create-bxc77" event={"ID":"acd74efa-e542-450a-b7c9-91eee744d0e9","Type":"ContainerStarted","Data":"78fdd346bab8a6ff20ef00d8fac20e29362cb430e4f2e2d5deb7207dd647f347"} Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.189503 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-g6pqn" event={"ID":"b9e45296-e54a-4563-9c44-6cdc4e2e5640","Type":"ContainerDied","Data":"c134d3d5d856e6bae56d5728d58f493979c1a7940dfd7b68526ab59043156ba2"} Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.189528 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c134d3d5d856e6bae56d5728d58f493979c1a7940dfd7b68526ab59043156ba2" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.189581 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-g6pqn" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.192948 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-k459x" event={"ID":"68d06993-8fe8-4f18-842b-a989f5f9c95c","Type":"ContainerDied","Data":"3a6befadbb9c15c919317d4556f5579c14730bcd1b1ec42238adac33865dc807"} Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.192972 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a6befadbb9c15c919317d4556f5579c14730bcd1b1ec42238adac33865dc807" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.193063 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-k459x" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.195741 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-bdt9h" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.195767 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-bdt9h" event={"ID":"daa756df-de77-404f-b164-d668cd3a544b","Type":"ContainerDied","Data":"dabc0d6b426795465c2fb1cdf385903924e790f89bcc9b064bb5c1dac3fa4870"} Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.195820 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dabc0d6b426795465c2fb1cdf385903924e790f89bcc9b064bb5c1dac3fa4870" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.200076 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-jmsc7" event={"ID":"5947d238-8eb8-407b-a497-407d7cca98a1","Type":"ContainerDied","Data":"fc98f164e539338e9e0899b7d39f8652b4c69995e41d4f16e6b721d3787ac7c1"} Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.200106 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc98f164e539338e9e0899b7d39f8652b4c69995e41d4f16e6b721d3787ac7c1" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.200146 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-jmsc7" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.201337 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h49b2" event={"ID":"fcbed96d-d57c-44ce-98ad-9a17f7579163","Type":"ContainerDied","Data":"f1ce0c1e7673b0b43d90f7c17c6f26c9ab47a76adac4ac9fa5892e8bbcd38cf6"} Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.201366 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1ce0c1e7673b0b43d90f7c17c6f26c9ab47a76adac4ac9fa5892e8bbcd38cf6" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.201430 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h49b2" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243221 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88zzm\" (UniqueName: \"kubernetes.io/projected/daa756df-de77-404f-b164-d668cd3a544b-kube-api-access-88zzm\") pod \"daa756df-de77-404f-b164-d668cd3a544b\" (UID: \"daa756df-de77-404f-b164-d668cd3a544b\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243304 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run-ovn\") pod \"5947d238-8eb8-407b-a497-407d7cca98a1\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243387 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run\") pod \"5947d238-8eb8-407b-a497-407d7cca98a1\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243429 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngh9r\" (UniqueName: \"kubernetes.io/projected/68d06993-8fe8-4f18-842b-a989f5f9c95c-kube-api-access-ngh9r\") pod \"68d06993-8fe8-4f18-842b-a989f5f9c95c\" (UID: \"68d06993-8fe8-4f18-842b-a989f5f9c95c\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243453 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-additional-scripts\") pod \"5947d238-8eb8-407b-a497-407d7cca98a1\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243488 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-scripts\") pod \"5947d238-8eb8-407b-a497-407d7cca98a1\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243583 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-log-ovn\") pod \"5947d238-8eb8-407b-a497-407d7cca98a1\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.243641 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mm4dh\" (UniqueName: \"kubernetes.io/projected/5947d238-8eb8-407b-a497-407d7cca98a1-kube-api-access-mm4dh\") pod \"5947d238-8eb8-407b-a497-407d7cca98a1\" (UID: \"5947d238-8eb8-407b-a497-407d7cca98a1\") " Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.244026 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5947d238-8eb8-407b-a497-407d7cca98a1" (UID: "5947d238-8eb8-407b-a497-407d7cca98a1"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.244089 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run" (OuterVolumeSpecName: "var-run") pod "5947d238-8eb8-407b-a497-407d7cca98a1" (UID: "5947d238-8eb8-407b-a497-407d7cca98a1"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.244090 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5947d238-8eb8-407b-a497-407d7cca98a1" (UID: "5947d238-8eb8-407b-a497-407d7cca98a1"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.244840 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5947d238-8eb8-407b-a497-407d7cca98a1" (UID: "5947d238-8eb8-407b-a497-407d7cca98a1"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.245054 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-scripts" (OuterVolumeSpecName: "scripts") pod "5947d238-8eb8-407b-a497-407d7cca98a1" (UID: "5947d238-8eb8-407b-a497-407d7cca98a1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.246492 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daa756df-de77-404f-b164-d668cd3a544b-kube-api-access-88zzm" (OuterVolumeSpecName: "kube-api-access-88zzm") pod "daa756df-de77-404f-b164-d668cd3a544b" (UID: "daa756df-de77-404f-b164-d668cd3a544b"). InnerVolumeSpecName "kube-api-access-88zzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.248450 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68d06993-8fe8-4f18-842b-a989f5f9c95c-kube-api-access-ngh9r" (OuterVolumeSpecName: "kube-api-access-ngh9r") pod "68d06993-8fe8-4f18-842b-a989f5f9c95c" (UID: "68d06993-8fe8-4f18-842b-a989f5f9c95c"). InnerVolumeSpecName "kube-api-access-ngh9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.248663 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5947d238-8eb8-407b-a497-407d7cca98a1-kube-api-access-mm4dh" (OuterVolumeSpecName: "kube-api-access-mm4dh") pod "5947d238-8eb8-407b-a497-407d7cca98a1" (UID: "5947d238-8eb8-407b-a497-407d7cca98a1"). InnerVolumeSpecName "kube-api-access-mm4dh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.345936 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mm4dh\" (UniqueName: \"kubernetes.io/projected/5947d238-8eb8-407b-a497-407d7cca98a1-kube-api-access-mm4dh\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.345976 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88zzm\" (UniqueName: \"kubernetes.io/projected/daa756df-de77-404f-b164-d668cd3a544b-kube-api-access-88zzm\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.345990 4664 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.346005 4664 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.346018 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngh9r\" (UniqueName: \"kubernetes.io/projected/68d06993-8fe8-4f18-842b-a989f5f9c95c-kube-api-access-ngh9r\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.346030 4664 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.346041 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5947d238-8eb8-407b-a497-407d7cca98a1-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.346051 4664 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5947d238-8eb8-407b-a497-407d7cca98a1-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:01 crc kubenswrapper[4664]: I1013 07:03:01.374838 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0a22-account-create-59l9x"] Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.201051 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-47b95-config-jmsc7"] Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.210191 4664 generic.go:334] "Generic (PLEG): container finished" podID="34827b2f-cc99-4948-868a-be9095deadb1" containerID="c726bd2426a25507bf2fd8c1a897af212fa2aca339c75eaff52015869ed1d0f5" exitCode=0 Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.210266 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0a22-account-create-59l9x" event={"ID":"34827b2f-cc99-4948-868a-be9095deadb1","Type":"ContainerDied","Data":"c726bd2426a25507bf2fd8c1a897af212fa2aca339c75eaff52015869ed1d0f5"} Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.210291 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0a22-account-create-59l9x" event={"ID":"34827b2f-cc99-4948-868a-be9095deadb1","Type":"ContainerStarted","Data":"e659f586cbf2daede9ae8b2205ed7f2d634acec87aec5ad32ae1c3ca616d1a66"} Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.211080 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-47b95-config-jmsc7"] Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.222165 4664 generic.go:334] "Generic (PLEG): container finished" podID="acd74efa-e542-450a-b7c9-91eee744d0e9" containerID="2f658df34978904e13580e51db56e05c697354be4c6236207c7142d839d2514d" exitCode=0 Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.222346 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-e6d7-account-create-bxc77" event={"ID":"acd74efa-e542-450a-b7c9-91eee744d0e9","Type":"ContainerDied","Data":"2f658df34978904e13580e51db56e05c697354be4c6236207c7142d839d2514d"} Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.226935 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"d18659662154bb89d7743ad0a93dfc417901989a4f63b20f08568eb7129799f4"} Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.275936 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-47b95-config-dsd9s"] Oct 13 07:03:02 crc kubenswrapper[4664]: E1013 07:03:02.278135 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa756df-de77-404f-b164-d668cd3a544b" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.278257 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa756df-de77-404f-b164-d668cd3a544b" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: E1013 07:03:02.278353 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcbed96d-d57c-44ce-98ad-9a17f7579163" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.278562 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcbed96d-d57c-44ce-98ad-9a17f7579163" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: E1013 07:03:02.278649 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e45296-e54a-4563-9c44-6cdc4e2e5640" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.278729 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e45296-e54a-4563-9c44-6cdc4e2e5640" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: E1013 07:03:02.278834 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5947d238-8eb8-407b-a497-407d7cca98a1" containerName="ovn-config" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.278917 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5947d238-8eb8-407b-a497-407d7cca98a1" containerName="ovn-config" Oct 13 07:03:02 crc kubenswrapper[4664]: E1013 07:03:02.278993 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d06993-8fe8-4f18-842b-a989f5f9c95c" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.279072 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d06993-8fe8-4f18-842b-a989f5f9c95c" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.279344 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="5947d238-8eb8-407b-a497-407d7cca98a1" containerName="ovn-config" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.279438 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9e45296-e54a-4563-9c44-6cdc4e2e5640" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.282495 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="daa756df-de77-404f-b164-d668cd3a544b" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.282613 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="68d06993-8fe8-4f18-842b-a989f5f9c95c" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.282670 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcbed96d-d57c-44ce-98ad-9a17f7579163" containerName="mariadb-database-create" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.283329 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.283996 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-dsd9s"] Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.286816 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.363822 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-log-ovn\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.363887 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.363929 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-additional-scripts\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.363965 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-scripts\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.364019 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run-ovn\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.364044 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4krz\" (UniqueName: \"kubernetes.io/projected/73309eda-6c6d-4020-8596-615f46e073f9-kube-api-access-v4krz\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465386 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465460 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-additional-scripts\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465497 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-scripts\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465549 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run-ovn\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465570 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4krz\" (UniqueName: \"kubernetes.io/projected/73309eda-6c6d-4020-8596-615f46e073f9-kube-api-access-v4krz\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465604 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-log-ovn\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465722 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-log-ovn\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465721 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.465758 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run-ovn\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.466198 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-additional-scripts\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.467623 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-scripts\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.484144 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4krz\" (UniqueName: \"kubernetes.io/projected/73309eda-6c6d-4020-8596-615f46e073f9-kube-api-access-v4krz\") pod \"ovn-controller-47b95-config-dsd9s\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:02 crc kubenswrapper[4664]: I1013 07:03:02.604345 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:03 crc kubenswrapper[4664]: I1013 07:03:03.066348 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5947d238-8eb8-407b-a497-407d7cca98a1" path="/var/lib/kubelet/pods/5947d238-8eb8-407b-a497-407d7cca98a1/volumes" Oct 13 07:03:03 crc kubenswrapper[4664]: I1013 07:03:03.133495 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-47b95-config-dsd9s"] Oct 13 07:03:03 crc kubenswrapper[4664]: I1013 07:03:03.238635 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"557915ec87fb3063d9ae3f603a191a4efe68ce498fad25d4860493039f3ea00e"} Oct 13 07:03:03 crc kubenswrapper[4664]: I1013 07:03:03.238687 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"5d22dc4c6ba6f0d3fdbf4b1c2c4e80fdcfecb663b0339a98227c73a0584e02c5"} Oct 13 07:03:03 crc kubenswrapper[4664]: I1013 07:03:03.238704 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"8741834c94e3d13b7f63718a2cf5237d9dff9432951208fa6927d85743942b7b"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:03.595652 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-e6d7-account-create-bxc77" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:03.682619 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wsrm\" (UniqueName: \"kubernetes.io/projected/acd74efa-e542-450a-b7c9-91eee744d0e9-kube-api-access-6wsrm\") pod \"acd74efa-e542-450a-b7c9-91eee744d0e9\" (UID: \"acd74efa-e542-450a-b7c9-91eee744d0e9\") " Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:03.690981 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acd74efa-e542-450a-b7c9-91eee744d0e9-kube-api-access-6wsrm" (OuterVolumeSpecName: "kube-api-access-6wsrm") pod "acd74efa-e542-450a-b7c9-91eee744d0e9" (UID: "acd74efa-e542-450a-b7c9-91eee744d0e9"). InnerVolumeSpecName "kube-api-access-6wsrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:03.785066 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wsrm\" (UniqueName: \"kubernetes.io/projected/acd74efa-e542-450a-b7c9-91eee744d0e9-kube-api-access-6wsrm\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.204254 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0a22-account-create-59l9x" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.257975 4664 generic.go:334] "Generic (PLEG): container finished" podID="73309eda-6c6d-4020-8596-615f46e073f9" containerID="05f88fb8ca570b9d330a6650f1b32924384538b447ef0afbf12442e2a18233db" exitCode=0 Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.259045 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-dsd9s" event={"ID":"73309eda-6c6d-4020-8596-615f46e073f9","Type":"ContainerDied","Data":"05f88fb8ca570b9d330a6650f1b32924384538b447ef0afbf12442e2a18233db"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.259146 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-dsd9s" event={"ID":"73309eda-6c6d-4020-8596-615f46e073f9","Type":"ContainerStarted","Data":"de066429716c34fdb1622efedccfaec6883d219de15a27e715b76ebd637e3e86"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.266406 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"47e8f1f8c8a87dff6ff7f5abaec4d69a32f173e777aeabd41e49d87c3f411fa9"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.266453 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"8ac231cd776dc8663b08d81e6ec848a6a89562a39f702cc000a893f18121de69"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.266462 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"0bfba1c26dd5a8119dc0a1a0e9bf0eb5353bb7a766c8fb15c1a504b2bdcf2944"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.266470 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"aa019b6f13c418b30ef924d1f16bb42d878d725c64b5c8b779f1b47545e42207"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.271077 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0a22-account-create-59l9x" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.271003 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0a22-account-create-59l9x" event={"ID":"34827b2f-cc99-4948-868a-be9095deadb1","Type":"ContainerDied","Data":"e659f586cbf2daede9ae8b2205ed7f2d634acec87aec5ad32ae1c3ca616d1a66"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.271351 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e659f586cbf2daede9ae8b2205ed7f2d634acec87aec5ad32ae1c3ca616d1a66" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.279203 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-e6d7-account-create-bxc77" event={"ID":"acd74efa-e542-450a-b7c9-91eee744d0e9","Type":"ContainerDied","Data":"78fdd346bab8a6ff20ef00d8fac20e29362cb430e4f2e2d5deb7207dd647f347"} Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.279242 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78fdd346bab8a6ff20ef00d8fac20e29362cb430e4f2e2d5deb7207dd647f347" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.279286 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-e6d7-account-create-bxc77" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.296819 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9hp4\" (UniqueName: \"kubernetes.io/projected/34827b2f-cc99-4948-868a-be9095deadb1-kube-api-access-j9hp4\") pod \"34827b2f-cc99-4948-868a-be9095deadb1\" (UID: \"34827b2f-cc99-4948-868a-be9095deadb1\") " Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.301925 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34827b2f-cc99-4948-868a-be9095deadb1-kube-api-access-j9hp4" (OuterVolumeSpecName: "kube-api-access-j9hp4") pod "34827b2f-cc99-4948-868a-be9095deadb1" (UID: "34827b2f-cc99-4948-868a-be9095deadb1"). InnerVolumeSpecName "kube-api-access-j9hp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:04 crc kubenswrapper[4664]: I1013 07:03:04.398735 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9hp4\" (UniqueName: \"kubernetes.io/projected/34827b2f-cc99-4948-868a-be9095deadb1-kube-api-access-j9hp4\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.342921 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"bf4c39bf5aee183a89c6ba685ba1f6c5db0255c9d21c5161047d5a7ade4e375b"} Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.343437 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"1a056d74ab2440116a10a6d2c7ef8fae2b21bb8d2c910473ebe70565f0310b0c"} Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.419988 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-3913-account-create-zcqth"] Oct 13 07:03:05 crc kubenswrapper[4664]: E1013 07:03:05.420597 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acd74efa-e542-450a-b7c9-91eee744d0e9" containerName="mariadb-account-create" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.420618 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="acd74efa-e542-450a-b7c9-91eee744d0e9" containerName="mariadb-account-create" Oct 13 07:03:05 crc kubenswrapper[4664]: E1013 07:03:05.420634 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34827b2f-cc99-4948-868a-be9095deadb1" containerName="mariadb-account-create" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.420641 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="34827b2f-cc99-4948-868a-be9095deadb1" containerName="mariadb-account-create" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.420863 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="34827b2f-cc99-4948-868a-be9095deadb1" containerName="mariadb-account-create" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.420897 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="acd74efa-e542-450a-b7c9-91eee744d0e9" containerName="mariadb-account-create" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.421485 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3913-account-create-zcqth" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.432258 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.434395 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-3913-account-create-zcqth"] Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.511549 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-bpmkp"] Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.512849 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.516544 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bf6x\" (UniqueName: \"kubernetes.io/projected/954b0067-d3cf-4f1d-a744-e748779c4422-kube-api-access-9bf6x\") pod \"glance-3913-account-create-zcqth\" (UID: \"954b0067-d3cf-4f1d-a744-e748779c4422\") " pod="openstack/glance-3913-account-create-zcqth" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.520482 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.520595 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kssk7" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.520785 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.525836 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-bpmkp"] Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.529351 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.618098 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-config-data\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.618183 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bf6x\" (UniqueName: \"kubernetes.io/projected/954b0067-d3cf-4f1d-a744-e748779c4422-kube-api-access-9bf6x\") pod \"glance-3913-account-create-zcqth\" (UID: \"954b0067-d3cf-4f1d-a744-e748779c4422\") " pod="openstack/glance-3913-account-create-zcqth" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.618243 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-combined-ca-bundle\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.618268 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cttd\" (UniqueName: \"kubernetes.io/projected/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-kube-api-access-7cttd\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.668093 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bf6x\" (UniqueName: \"kubernetes.io/projected/954b0067-d3cf-4f1d-a744-e748779c4422-kube-api-access-9bf6x\") pod \"glance-3913-account-create-zcqth\" (UID: \"954b0067-d3cf-4f1d-a744-e748779c4422\") " pod="openstack/glance-3913-account-create-zcqth" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.719882 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-config-data\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.719975 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-combined-ca-bundle\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.720004 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cttd\" (UniqueName: \"kubernetes.io/projected/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-kube-api-access-7cttd\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.724658 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-combined-ca-bundle\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.726688 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-config-data\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.749531 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cttd\" (UniqueName: \"kubernetes.io/projected/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-kube-api-access-7cttd\") pod \"keystone-db-sync-bpmkp\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.792830 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3913-account-create-zcqth" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.836190 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.854711 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933324 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-additional-scripts\") pod \"73309eda-6c6d-4020-8596-615f46e073f9\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933364 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-log-ovn\") pod \"73309eda-6c6d-4020-8596-615f46e073f9\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933382 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run\") pod \"73309eda-6c6d-4020-8596-615f46e073f9\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933486 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4krz\" (UniqueName: \"kubernetes.io/projected/73309eda-6c6d-4020-8596-615f46e073f9-kube-api-access-v4krz\") pod \"73309eda-6c6d-4020-8596-615f46e073f9\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933523 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-scripts\") pod \"73309eda-6c6d-4020-8596-615f46e073f9\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933587 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run-ovn\") pod \"73309eda-6c6d-4020-8596-615f46e073f9\" (UID: \"73309eda-6c6d-4020-8596-615f46e073f9\") " Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933965 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "73309eda-6c6d-4020-8596-615f46e073f9" (UID: "73309eda-6c6d-4020-8596-615f46e073f9"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.933995 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run" (OuterVolumeSpecName: "var-run") pod "73309eda-6c6d-4020-8596-615f46e073f9" (UID: "73309eda-6c6d-4020-8596-615f46e073f9"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.934247 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "73309eda-6c6d-4020-8596-615f46e073f9" (UID: "73309eda-6c6d-4020-8596-615f46e073f9"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.934584 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "73309eda-6c6d-4020-8596-615f46e073f9" (UID: "73309eda-6c6d-4020-8596-615f46e073f9"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.934970 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-scripts" (OuterVolumeSpecName: "scripts") pod "73309eda-6c6d-4020-8596-615f46e073f9" (UID: "73309eda-6c6d-4020-8596-615f46e073f9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:05 crc kubenswrapper[4664]: I1013 07:03:05.941261 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73309eda-6c6d-4020-8596-615f46e073f9-kube-api-access-v4krz" (OuterVolumeSpecName: "kube-api-access-v4krz") pod "73309eda-6c6d-4020-8596-615f46e073f9" (UID: "73309eda-6c6d-4020-8596-615f46e073f9"). InnerVolumeSpecName "kube-api-access-v4krz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.035109 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4krz\" (UniqueName: \"kubernetes.io/projected/73309eda-6c6d-4020-8596-615f46e073f9-kube-api-access-v4krz\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.035354 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.035368 4664 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.035379 4664 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/73309eda-6c6d-4020-8596-615f46e073f9-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.035388 4664 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.035395 4664 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/73309eda-6c6d-4020-8596-615f46e073f9-var-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.320827 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-bpmkp"] Oct 13 07:03:06 crc kubenswrapper[4664]: W1013 07:03:06.337986 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21047ce5_4bdf_4660_a953_ab87a8e5e5e1.slice/crio-2a1eec921bbeadc406929920fd61a2474c551b98782162fea6f5268816a074a5 WatchSource:0}: Error finding container 2a1eec921bbeadc406929920fd61a2474c551b98782162fea6f5268816a074a5: Status 404 returned error can't find the container with id 2a1eec921bbeadc406929920fd61a2474c551b98782162fea6f5268816a074a5 Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.339274 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-3913-account-create-zcqth"] Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.356457 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-47b95-config-dsd9s" event={"ID":"73309eda-6c6d-4020-8596-615f46e073f9","Type":"ContainerDied","Data":"de066429716c34fdb1622efedccfaec6883d219de15a27e715b76ebd637e3e86"} Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.356490 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-47b95-config-dsd9s" Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.356502 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de066429716c34fdb1622efedccfaec6883d219de15a27e715b76ebd637e3e86" Oct 13 07:03:06 crc kubenswrapper[4664]: W1013 07:03:06.360053 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod954b0067_d3cf_4f1d_a744_e748779c4422.slice/crio-fc0d249e3837cb9669f4ab48e0b690f535e651b338d7b308346e8a75cd50b10c WatchSource:0}: Error finding container fc0d249e3837cb9669f4ab48e0b690f535e651b338d7b308346e8a75cd50b10c: Status 404 returned error can't find the container with id fc0d249e3837cb9669f4ab48e0b690f535e651b338d7b308346e8a75cd50b10c Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.377250 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"7454cd2d938c043921260b0a5e637e92414d475bbbe95d48655fe03aaafe5645"} Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.377296 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"177c29a07d53c8825c85be6a4f936b59106fa9fd95c711422889301a917c32eb"} Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.377310 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"3b9cc29bfa621468cb2e0aebd85584adb3be749049da89e72d95c63182eaede8"} Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.377320 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"4c7a86254eaa664a12492ec4fe122b44a29352798216a703ba384b81a7dbb711"} Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.380031 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bpmkp" event={"ID":"21047ce5-4bdf-4660-a953-ab87a8e5e5e1","Type":"ContainerStarted","Data":"2a1eec921bbeadc406929920fd61a2474c551b98782162fea6f5268816a074a5"} Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.944212 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-47b95-config-dsd9s"] Oct 13 07:03:06 crc kubenswrapper[4664]: I1013 07:03:06.956646 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-47b95-config-dsd9s"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.001353 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1f27-account-create-rspbf"] Oct 13 07:03:07 crc kubenswrapper[4664]: E1013 07:03:07.001696 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73309eda-6c6d-4020-8596-615f46e073f9" containerName="ovn-config" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.001719 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="73309eda-6c6d-4020-8596-615f46e073f9" containerName="ovn-config" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.001932 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="73309eda-6c6d-4020-8596-615f46e073f9" containerName="ovn-config" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.002471 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1f27-account-create-rspbf" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.004551 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.013570 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1f27-account-create-rspbf"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.055517 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73309eda-6c6d-4020-8596-615f46e073f9" path="/var/lib/kubelet/pods/73309eda-6c6d-4020-8596-615f46e073f9/volumes" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.055663 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42lwd\" (UniqueName: \"kubernetes.io/projected/ca5c6d84-14c0-410b-9e05-610f875bdb0a-kube-api-access-42lwd\") pod \"barbican-1f27-account-create-rspbf\" (UID: \"ca5c6d84-14c0-410b-9e05-610f875bdb0a\") " pod="openstack/barbican-1f27-account-create-rspbf" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.156904 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42lwd\" (UniqueName: \"kubernetes.io/projected/ca5c6d84-14c0-410b-9e05-610f875bdb0a-kube-api-access-42lwd\") pod \"barbican-1f27-account-create-rspbf\" (UID: \"ca5c6d84-14c0-410b-9e05-610f875bdb0a\") " pod="openstack/barbican-1f27-account-create-rspbf" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.180115 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42lwd\" (UniqueName: \"kubernetes.io/projected/ca5c6d84-14c0-410b-9e05-610f875bdb0a-kube-api-access-42lwd\") pod \"barbican-1f27-account-create-rspbf\" (UID: \"ca5c6d84-14c0-410b-9e05-610f875bdb0a\") " pod="openstack/barbican-1f27-account-create-rspbf" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.203898 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-3aef-account-create-psnmm"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.204880 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-3aef-account-create-psnmm" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.207891 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.216662 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-3aef-account-create-psnmm"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.258698 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2sxr\" (UniqueName: \"kubernetes.io/projected/6ac9ed1e-0ef3-48b5-be63-a75276e406e7-kube-api-access-b2sxr\") pod \"heat-3aef-account-create-psnmm\" (UID: \"6ac9ed1e-0ef3-48b5-be63-a75276e406e7\") " pod="openstack/heat-3aef-account-create-psnmm" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.305685 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-0861-account-create-vbdml"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.307695 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0861-account-create-vbdml" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.311177 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.314246 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0861-account-create-vbdml"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.320268 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1f27-account-create-rspbf" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.360232 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bckm5\" (UniqueName: \"kubernetes.io/projected/6764cf07-0dd7-4ce1-8ab9-684feaae4aec-kube-api-access-bckm5\") pod \"cinder-0861-account-create-vbdml\" (UID: \"6764cf07-0dd7-4ce1-8ab9-684feaae4aec\") " pod="openstack/cinder-0861-account-create-vbdml" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.360335 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2sxr\" (UniqueName: \"kubernetes.io/projected/6ac9ed1e-0ef3-48b5-be63-a75276e406e7-kube-api-access-b2sxr\") pod \"heat-3aef-account-create-psnmm\" (UID: \"6ac9ed1e-0ef3-48b5-be63-a75276e406e7\") " pod="openstack/heat-3aef-account-create-psnmm" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.384959 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2sxr\" (UniqueName: \"kubernetes.io/projected/6ac9ed1e-0ef3-48b5-be63-a75276e406e7-kube-api-access-b2sxr\") pod \"heat-3aef-account-create-psnmm\" (UID: \"6ac9ed1e-0ef3-48b5-be63-a75276e406e7\") " pod="openstack/heat-3aef-account-create-psnmm" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.444093 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"9953b8b0-42bc-4192-8791-d0564fa27f10","Type":"ContainerStarted","Data":"69a8cf4c608665262e28716dec952ece5e9f39747baf8e95d498d163c5665bad"} Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.455709 4664 generic.go:334] "Generic (PLEG): container finished" podID="954b0067-d3cf-4f1d-a744-e748779c4422" containerID="a98a1492280263b06e8dffc642d112c6dce58cc3cc13d8c1e6326bf5f2833866" exitCode=0 Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.455751 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3913-account-create-zcqth" event={"ID":"954b0067-d3cf-4f1d-a744-e748779c4422","Type":"ContainerDied","Data":"a98a1492280263b06e8dffc642d112c6dce58cc3cc13d8c1e6326bf5f2833866"} Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.455776 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3913-account-create-zcqth" event={"ID":"954b0067-d3cf-4f1d-a744-e748779c4422","Type":"ContainerStarted","Data":"fc0d249e3837cb9669f4ab48e0b690f535e651b338d7b308346e8a75cd50b10c"} Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.464940 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bckm5\" (UniqueName: \"kubernetes.io/projected/6764cf07-0dd7-4ce1-8ab9-684feaae4aec-kube-api-access-bckm5\") pod \"cinder-0861-account-create-vbdml\" (UID: \"6764cf07-0dd7-4ce1-8ab9-684feaae4aec\") " pod="openstack/cinder-0861-account-create-vbdml" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.480319 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bckm5\" (UniqueName: \"kubernetes.io/projected/6764cf07-0dd7-4ce1-8ab9-684feaae4aec-kube-api-access-bckm5\") pod \"cinder-0861-account-create-vbdml\" (UID: \"6764cf07-0dd7-4ce1-8ab9-684feaae4aec\") " pod="openstack/cinder-0861-account-create-vbdml" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.492094 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=20.616795124 podStartE2EDuration="25.492077107s" podCreationTimestamp="2025-10-13 07:02:42 +0000 UTC" firstStartedPulling="2025-10-13 07:02:59.997230444 +0000 UTC m=+987.684675636" lastFinishedPulling="2025-10-13 07:03:04.872512427 +0000 UTC m=+992.559957619" observedRunningTime="2025-10-13 07:03:07.488756657 +0000 UTC m=+995.176201849" watchObservedRunningTime="2025-10-13 07:03:07.492077107 +0000 UTC m=+995.179522299" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.535320 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-3aef-account-create-psnmm" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.601577 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-fcae-account-create-2zt28"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.605265 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fcae-account-create-2zt28" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.609787 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.620091 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-fcae-account-create-2zt28"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.633600 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0861-account-create-vbdml" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.701478 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hwsr\" (UniqueName: \"kubernetes.io/projected/d02b8efa-0c69-4122-a4d1-811d06cf6ac1-kube-api-access-9hwsr\") pod \"neutron-fcae-account-create-2zt28\" (UID: \"d02b8efa-0c69-4122-a4d1-811d06cf6ac1\") " pod="openstack/neutron-fcae-account-create-2zt28" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.769885 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5dcf8755f-v8kgn"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.775976 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.788654 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.803876 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hwsr\" (UniqueName: \"kubernetes.io/projected/d02b8efa-0c69-4122-a4d1-811d06cf6ac1-kube-api-access-9hwsr\") pod \"neutron-fcae-account-create-2zt28\" (UID: \"d02b8efa-0c69-4122-a4d1-811d06cf6ac1\") " pod="openstack/neutron-fcae-account-create-2zt28" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.810716 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dcf8755f-v8kgn"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.823703 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1f27-account-create-rspbf"] Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.843407 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hwsr\" (UniqueName: \"kubernetes.io/projected/d02b8efa-0c69-4122-a4d1-811d06cf6ac1-kube-api-access-9hwsr\") pod \"neutron-fcae-account-create-2zt28\" (UID: \"d02b8efa-0c69-4122-a4d1-811d06cf6ac1\") " pod="openstack/neutron-fcae-account-create-2zt28" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.911889 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-swift-storage-0\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.911995 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-svc\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.912018 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crztk\" (UniqueName: \"kubernetes.io/projected/caee2c49-8d8b-4df3-86b6-424791c35ff5-kube-api-access-crztk\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.912074 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-nb\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.912407 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-sb\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.912457 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-config\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:07 crc kubenswrapper[4664]: I1013 07:03:07.933815 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fcae-account-create-2zt28" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.013940 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-svc\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.013993 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crztk\" (UniqueName: \"kubernetes.io/projected/caee2c49-8d8b-4df3-86b6-424791c35ff5-kube-api-access-crztk\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.014032 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-nb\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.014090 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-sb\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.014116 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-config\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.014155 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-swift-storage-0\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.016313 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-svc\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.016917 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-config\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.018625 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-swift-storage-0\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.019738 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-nb\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.019832 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-sb\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.033966 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crztk\" (UniqueName: \"kubernetes.io/projected/caee2c49-8d8b-4df3-86b6-424791c35ff5-kube-api-access-crztk\") pod \"dnsmasq-dns-5dcf8755f-v8kgn\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.072762 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-3aef-account-create-psnmm"] Oct 13 07:03:08 crc kubenswrapper[4664]: W1013 07:03:08.082690 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ac9ed1e_0ef3_48b5_be63_a75276e406e7.slice/crio-8607c5ce0d640cfe11b3853886529ab8ce9693a88940c9227ac81e43db1d389d WatchSource:0}: Error finding container 8607c5ce0d640cfe11b3853886529ab8ce9693a88940c9227ac81e43db1d389d: Status 404 returned error can't find the container with id 8607c5ce0d640cfe11b3853886529ab8ce9693a88940c9227ac81e43db1d389d Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.114540 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.244286 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0861-account-create-vbdml"] Oct 13 07:03:08 crc kubenswrapper[4664]: E1013 07:03:08.380603 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca5c6d84_14c0_410b_9e05_610f875bdb0a.slice/crio-4607bc3d98a8b4a62a54bc5cbc1cfafd39b8638631c628907e695f332877e1b7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca5c6d84_14c0_410b_9e05_610f875bdb0a.slice/crio-conmon-4607bc3d98a8b4a62a54bc5cbc1cfafd39b8638631c628907e695f332877e1b7.scope\": RecentStats: unable to find data in memory cache]" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.385932 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-fcae-account-create-2zt28"] Oct 13 07:03:08 crc kubenswrapper[4664]: W1013 07:03:08.397423 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd02b8efa_0c69_4122_a4d1_811d06cf6ac1.slice/crio-0b6dbc4cd4bd36454500a08b47e00bec1929089b719f3cd1739652fba9fa3c6c WatchSource:0}: Error finding container 0b6dbc4cd4bd36454500a08b47e00bec1929089b719f3cd1739652fba9fa3c6c: Status 404 returned error can't find the container with id 0b6dbc4cd4bd36454500a08b47e00bec1929089b719f3cd1739652fba9fa3c6c Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.467587 4664 generic.go:334] "Generic (PLEG): container finished" podID="6ac9ed1e-0ef3-48b5-be63-a75276e406e7" containerID="f3f1f0a7aa4f371c4cd8dd2628174b3188091716d8623ffcc23692a3680015a6" exitCode=0 Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.467884 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-3aef-account-create-psnmm" event={"ID":"6ac9ed1e-0ef3-48b5-be63-a75276e406e7","Type":"ContainerDied","Data":"f3f1f0a7aa4f371c4cd8dd2628174b3188091716d8623ffcc23692a3680015a6"} Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.468944 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-3aef-account-create-psnmm" event={"ID":"6ac9ed1e-0ef3-48b5-be63-a75276e406e7","Type":"ContainerStarted","Data":"8607c5ce0d640cfe11b3853886529ab8ce9693a88940c9227ac81e43db1d389d"} Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.471763 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fcae-account-create-2zt28" event={"ID":"d02b8efa-0c69-4122-a4d1-811d06cf6ac1","Type":"ContainerStarted","Data":"0b6dbc4cd4bd36454500a08b47e00bec1929089b719f3cd1739652fba9fa3c6c"} Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.473590 4664 generic.go:334] "Generic (PLEG): container finished" podID="ca5c6d84-14c0-410b-9e05-610f875bdb0a" containerID="4607bc3d98a8b4a62a54bc5cbc1cfafd39b8638631c628907e695f332877e1b7" exitCode=0 Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.473650 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1f27-account-create-rspbf" event={"ID":"ca5c6d84-14c0-410b-9e05-610f875bdb0a","Type":"ContainerDied","Data":"4607bc3d98a8b4a62a54bc5cbc1cfafd39b8638631c628907e695f332877e1b7"} Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.473688 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1f27-account-create-rspbf" event={"ID":"ca5c6d84-14c0-410b-9e05-610f875bdb0a","Type":"ContainerStarted","Data":"3024d6d2c9f8b9480275bb92611c4f38639e6df458e181cc1fdaacaba475319c"} Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.476716 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0861-account-create-vbdml" event={"ID":"6764cf07-0dd7-4ce1-8ab9-684feaae4aec","Type":"ContainerStarted","Data":"bb8e6740646b74ec4c91aada60f88d7b56c9fe853acbfa714ce5995d7c9872d2"} Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.507711 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-0861-account-create-vbdml" podStartSLOduration=1.5076922430000002 podStartE2EDuration="1.507692243s" podCreationTimestamp="2025-10-13 07:03:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:03:08.50047481 +0000 UTC m=+996.187920002" watchObservedRunningTime="2025-10-13 07:03:08.507692243 +0000 UTC m=+996.195137435" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.592538 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dcf8755f-v8kgn"] Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.820293 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3913-account-create-zcqth" Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.938215 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bf6x\" (UniqueName: \"kubernetes.io/projected/954b0067-d3cf-4f1d-a744-e748779c4422-kube-api-access-9bf6x\") pod \"954b0067-d3cf-4f1d-a744-e748779c4422\" (UID: \"954b0067-d3cf-4f1d-a744-e748779c4422\") " Oct 13 07:03:08 crc kubenswrapper[4664]: I1013 07:03:08.942328 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/954b0067-d3cf-4f1d-a744-e748779c4422-kube-api-access-9bf6x" (OuterVolumeSpecName: "kube-api-access-9bf6x") pod "954b0067-d3cf-4f1d-a744-e748779c4422" (UID: "954b0067-d3cf-4f1d-a744-e748779c4422"). InnerVolumeSpecName "kube-api-access-9bf6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.039859 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bf6x\" (UniqueName: \"kubernetes.io/projected/954b0067-d3cf-4f1d-a744-e748779c4422-kube-api-access-9bf6x\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.486527 4664 generic.go:334] "Generic (PLEG): container finished" podID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerID="b4c7ddf2116a9f856cb9871fcf1a383f52ea39bb34d56d2431a0d2a37c32ab21" exitCode=0 Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.486836 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" event={"ID":"caee2c49-8d8b-4df3-86b6-424791c35ff5","Type":"ContainerDied","Data":"b4c7ddf2116a9f856cb9871fcf1a383f52ea39bb34d56d2431a0d2a37c32ab21"} Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.486903 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" event={"ID":"caee2c49-8d8b-4df3-86b6-424791c35ff5","Type":"ContainerStarted","Data":"e5aef54ec951912e8a66e05aebc80911713482c728b89e3e90ab3c57ec836ab8"} Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.489477 4664 generic.go:334] "Generic (PLEG): container finished" podID="6764cf07-0dd7-4ce1-8ab9-684feaae4aec" containerID="e773dbc49f3028c8c43252c120362347f33d0de3c68ae82615d85272ca0bf6e6" exitCode=0 Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.489546 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0861-account-create-vbdml" event={"ID":"6764cf07-0dd7-4ce1-8ab9-684feaae4aec","Type":"ContainerDied","Data":"e773dbc49f3028c8c43252c120362347f33d0de3c68ae82615d85272ca0bf6e6"} Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.491100 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3913-account-create-zcqth" event={"ID":"954b0067-d3cf-4f1d-a744-e748779c4422","Type":"ContainerDied","Data":"fc0d249e3837cb9669f4ab48e0b690f535e651b338d7b308346e8a75cd50b10c"} Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.491143 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc0d249e3837cb9669f4ab48e0b690f535e651b338d7b308346e8a75cd50b10c" Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.491211 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3913-account-create-zcqth" Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.496183 4664 generic.go:334] "Generic (PLEG): container finished" podID="d02b8efa-0c69-4122-a4d1-811d06cf6ac1" containerID="05cad278c964d0f77e8f47b9d9368c7cbeb3cd0dc88a3772db0bfaff99a1cf3f" exitCode=0 Oct 13 07:03:09 crc kubenswrapper[4664]: I1013 07:03:09.496241 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fcae-account-create-2zt28" event={"ID":"d02b8efa-0c69-4122-a4d1-811d06cf6ac1","Type":"ContainerDied","Data":"05cad278c964d0f77e8f47b9d9368c7cbeb3cd0dc88a3772db0bfaff99a1cf3f"} Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.604376 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-7nsnc"] Oct 13 07:03:10 crc kubenswrapper[4664]: E1013 07:03:10.605845 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="954b0067-d3cf-4f1d-a744-e748779c4422" containerName="mariadb-account-create" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.605871 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="954b0067-d3cf-4f1d-a744-e748779c4422" containerName="mariadb-account-create" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.606079 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="954b0067-d3cf-4f1d-a744-e748779c4422" containerName="mariadb-account-create" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.607621 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.610681 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.610978 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rbmdn" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.636007 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-7nsnc"] Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.672492 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-combined-ca-bundle\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.672944 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-db-sync-config-data\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.672972 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkg74\" (UniqueName: \"kubernetes.io/projected/0ca51385-fe74-4d5b-a542-f33734fb8e46-kube-api-access-pkg74\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.673056 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-config-data\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.774077 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-db-sync-config-data\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.774137 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkg74\" (UniqueName: \"kubernetes.io/projected/0ca51385-fe74-4d5b-a542-f33734fb8e46-kube-api-access-pkg74\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.774202 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-config-data\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.774339 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-combined-ca-bundle\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.801557 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-db-sync-config-data\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.803407 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-combined-ca-bundle\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.831634 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-config-data\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.836109 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkg74\" (UniqueName: \"kubernetes.io/projected/0ca51385-fe74-4d5b-a542-f33734fb8e46-kube-api-access-pkg74\") pod \"glance-db-sync-7nsnc\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:10 crc kubenswrapper[4664]: I1013 07:03:10.944190 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7nsnc" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.545406 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-3aef-account-create-psnmm" event={"ID":"6ac9ed1e-0ef3-48b5-be63-a75276e406e7","Type":"ContainerDied","Data":"8607c5ce0d640cfe11b3853886529ab8ce9693a88940c9227ac81e43db1d389d"} Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.545891 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8607c5ce0d640cfe11b3853886529ab8ce9693a88940c9227ac81e43db1d389d" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.550013 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fcae-account-create-2zt28" event={"ID":"d02b8efa-0c69-4122-a4d1-811d06cf6ac1","Type":"ContainerDied","Data":"0b6dbc4cd4bd36454500a08b47e00bec1929089b719f3cd1739652fba9fa3c6c"} Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.550041 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b6dbc4cd4bd36454500a08b47e00bec1929089b719f3cd1739652fba9fa3c6c" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.552210 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1f27-account-create-rspbf" event={"ID":"ca5c6d84-14c0-410b-9e05-610f875bdb0a","Type":"ContainerDied","Data":"3024d6d2c9f8b9480275bb92611c4f38639e6df458e181cc1fdaacaba475319c"} Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.552227 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3024d6d2c9f8b9480275bb92611c4f38639e6df458e181cc1fdaacaba475319c" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.558422 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0861-account-create-vbdml" event={"ID":"6764cf07-0dd7-4ce1-8ab9-684feaae4aec","Type":"ContainerDied","Data":"bb8e6740646b74ec4c91aada60f88d7b56c9fe853acbfa714ce5995d7c9872d2"} Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.558443 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb8e6740646b74ec4c91aada60f88d7b56c9fe853acbfa714ce5995d7c9872d2" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.623778 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fcae-account-create-2zt28" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.633730 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1f27-account-create-rspbf" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.662009 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0861-account-create-vbdml" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.662595 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-3aef-account-create-psnmm" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.715051 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hwsr\" (UniqueName: \"kubernetes.io/projected/d02b8efa-0c69-4122-a4d1-811d06cf6ac1-kube-api-access-9hwsr\") pod \"d02b8efa-0c69-4122-a4d1-811d06cf6ac1\" (UID: \"d02b8efa-0c69-4122-a4d1-811d06cf6ac1\") " Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.715144 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42lwd\" (UniqueName: \"kubernetes.io/projected/ca5c6d84-14c0-410b-9e05-610f875bdb0a-kube-api-access-42lwd\") pod \"ca5c6d84-14c0-410b-9e05-610f875bdb0a\" (UID: \"ca5c6d84-14c0-410b-9e05-610f875bdb0a\") " Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.715179 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2sxr\" (UniqueName: \"kubernetes.io/projected/6ac9ed1e-0ef3-48b5-be63-a75276e406e7-kube-api-access-b2sxr\") pod \"6ac9ed1e-0ef3-48b5-be63-a75276e406e7\" (UID: \"6ac9ed1e-0ef3-48b5-be63-a75276e406e7\") " Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.715231 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bckm5\" (UniqueName: \"kubernetes.io/projected/6764cf07-0dd7-4ce1-8ab9-684feaae4aec-kube-api-access-bckm5\") pod \"6764cf07-0dd7-4ce1-8ab9-684feaae4aec\" (UID: \"6764cf07-0dd7-4ce1-8ab9-684feaae4aec\") " Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.737216 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d02b8efa-0c69-4122-a4d1-811d06cf6ac1-kube-api-access-9hwsr" (OuterVolumeSpecName: "kube-api-access-9hwsr") pod "d02b8efa-0c69-4122-a4d1-811d06cf6ac1" (UID: "d02b8efa-0c69-4122-a4d1-811d06cf6ac1"). InnerVolumeSpecName "kube-api-access-9hwsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.737490 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6764cf07-0dd7-4ce1-8ab9-684feaae4aec-kube-api-access-bckm5" (OuterVolumeSpecName: "kube-api-access-bckm5") pod "6764cf07-0dd7-4ce1-8ab9-684feaae4aec" (UID: "6764cf07-0dd7-4ce1-8ab9-684feaae4aec"). InnerVolumeSpecName "kube-api-access-bckm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.737573 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca5c6d84-14c0-410b-9e05-610f875bdb0a-kube-api-access-42lwd" (OuterVolumeSpecName: "kube-api-access-42lwd") pod "ca5c6d84-14c0-410b-9e05-610f875bdb0a" (UID: "ca5c6d84-14c0-410b-9e05-610f875bdb0a"). InnerVolumeSpecName "kube-api-access-42lwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.737702 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ac9ed1e-0ef3-48b5-be63-a75276e406e7-kube-api-access-b2sxr" (OuterVolumeSpecName: "kube-api-access-b2sxr") pod "6ac9ed1e-0ef3-48b5-be63-a75276e406e7" (UID: "6ac9ed1e-0ef3-48b5-be63-a75276e406e7"). InnerVolumeSpecName "kube-api-access-b2sxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.817505 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42lwd\" (UniqueName: \"kubernetes.io/projected/ca5c6d84-14c0-410b-9e05-610f875bdb0a-kube-api-access-42lwd\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.817537 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2sxr\" (UniqueName: \"kubernetes.io/projected/6ac9ed1e-0ef3-48b5-be63-a75276e406e7-kube-api-access-b2sxr\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.817550 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bckm5\" (UniqueName: \"kubernetes.io/projected/6764cf07-0dd7-4ce1-8ab9-684feaae4aec-kube-api-access-bckm5\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:12 crc kubenswrapper[4664]: I1013 07:03:12.817560 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hwsr\" (UniqueName: \"kubernetes.io/projected/d02b8efa-0c69-4122-a4d1-811d06cf6ac1-kube-api-access-9hwsr\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.102149 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-7nsnc"] Oct 13 07:03:13 crc kubenswrapper[4664]: W1013 07:03:13.103898 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ca51385_fe74_4d5b_a542_f33734fb8e46.slice/crio-bc1e16028c08de853d684ae6ddb79408b58f970a469ccab912d684ffb797aee8 WatchSource:0}: Error finding container bc1e16028c08de853d684ae6ddb79408b58f970a469ccab912d684ffb797aee8: Status 404 returned error can't find the container with id bc1e16028c08de853d684ae6ddb79408b58f970a469ccab912d684ffb797aee8 Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.570222 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bpmkp" event={"ID":"21047ce5-4bdf-4660-a953-ab87a8e5e5e1","Type":"ContainerStarted","Data":"602abac067ce7d01f36bd018b9499cbae893c782189358d70f04166abee94494"} Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.572757 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" event={"ID":"caee2c49-8d8b-4df3-86b6-424791c35ff5","Type":"ContainerStarted","Data":"7bee5e3b29e756f099e44fabfd82181457af418fb8c8c30ff1af9550520ee06b"} Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.572924 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.574634 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fcae-account-create-2zt28" Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.575938 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7nsnc" event={"ID":"0ca51385-fe74-4d5b-a542-f33734fb8e46","Type":"ContainerStarted","Data":"bc1e16028c08de853d684ae6ddb79408b58f970a469ccab912d684ffb797aee8"} Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.576061 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-3aef-account-create-psnmm" Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.576922 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1f27-account-create-rspbf" Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.578510 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0861-account-create-vbdml" Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.589638 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-bpmkp" podStartSLOduration=2.417954185 podStartE2EDuration="8.5896177s" podCreationTimestamp="2025-10-13 07:03:05 +0000 UTC" firstStartedPulling="2025-10-13 07:03:06.344174483 +0000 UTC m=+994.031619675" lastFinishedPulling="2025-10-13 07:03:12.515837998 +0000 UTC m=+1000.203283190" observedRunningTime="2025-10-13 07:03:13.589353653 +0000 UTC m=+1001.276798845" watchObservedRunningTime="2025-10-13 07:03:13.5896177 +0000 UTC m=+1001.277062912" Oct 13 07:03:13 crc kubenswrapper[4664]: I1013 07:03:13.613923 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" podStartSLOduration=6.613908899 podStartE2EDuration="6.613908899s" podCreationTimestamp="2025-10-13 07:03:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:03:13.608212206 +0000 UTC m=+1001.295657428" watchObservedRunningTime="2025-10-13 07:03:13.613908899 +0000 UTC m=+1001.301354091" Oct 13 07:03:15 crc kubenswrapper[4664]: I1013 07:03:15.598839 4664 generic.go:334] "Generic (PLEG): container finished" podID="21047ce5-4bdf-4660-a953-ab87a8e5e5e1" containerID="602abac067ce7d01f36bd018b9499cbae893c782189358d70f04166abee94494" exitCode=0 Oct 13 07:03:15 crc kubenswrapper[4664]: I1013 07:03:15.598942 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bpmkp" event={"ID":"21047ce5-4bdf-4660-a953-ab87a8e5e5e1","Type":"ContainerDied","Data":"602abac067ce7d01f36bd018b9499cbae893c782189358d70f04166abee94494"} Oct 13 07:03:16 crc kubenswrapper[4664]: I1013 07:03:16.911992 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.080477 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cttd\" (UniqueName: \"kubernetes.io/projected/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-kube-api-access-7cttd\") pod \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.080638 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-config-data\") pod \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.080890 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-combined-ca-bundle\") pod \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\" (UID: \"21047ce5-4bdf-4660-a953-ab87a8e5e5e1\") " Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.092032 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-kube-api-access-7cttd" (OuterVolumeSpecName: "kube-api-access-7cttd") pod "21047ce5-4bdf-4660-a953-ab87a8e5e5e1" (UID: "21047ce5-4bdf-4660-a953-ab87a8e5e5e1"). InnerVolumeSpecName "kube-api-access-7cttd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.118499 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "21047ce5-4bdf-4660-a953-ab87a8e5e5e1" (UID: "21047ce5-4bdf-4660-a953-ab87a8e5e5e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.142574 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-config-data" (OuterVolumeSpecName: "config-data") pod "21047ce5-4bdf-4660-a953-ab87a8e5e5e1" (UID: "21047ce5-4bdf-4660-a953-ab87a8e5e5e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.189081 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cttd\" (UniqueName: \"kubernetes.io/projected/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-kube-api-access-7cttd\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.189183 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.189246 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21047ce5-4bdf-4660-a953-ab87a8e5e5e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.615775 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bpmkp" event={"ID":"21047ce5-4bdf-4660-a953-ab87a8e5e5e1","Type":"ContainerDied","Data":"2a1eec921bbeadc406929920fd61a2474c551b98782162fea6f5268816a074a5"} Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.615837 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a1eec921bbeadc406929920fd61a2474c551b98782162fea6f5268816a074a5" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.615900 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bpmkp" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.852099 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dcf8755f-v8kgn"] Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.852394 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerName="dnsmasq-dns" containerID="cri-o://7bee5e3b29e756f099e44fabfd82181457af418fb8c8c30ff1af9550520ee06b" gracePeriod=10 Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.876912 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.946899 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84c8b5579c-gl2vc"] Oct 13 07:03:17 crc kubenswrapper[4664]: E1013 07:03:17.947268 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21047ce5-4bdf-4660-a953-ab87a8e5e5e1" containerName="keystone-db-sync" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.947280 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="21047ce5-4bdf-4660-a953-ab87a8e5e5e1" containerName="keystone-db-sync" Oct 13 07:03:17 crc kubenswrapper[4664]: E1013 07:03:17.947397 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ac9ed1e-0ef3-48b5-be63-a75276e406e7" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.947404 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ac9ed1e-0ef3-48b5-be63-a75276e406e7" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: E1013 07:03:17.947415 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6764cf07-0dd7-4ce1-8ab9-684feaae4aec" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.947421 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6764cf07-0dd7-4ce1-8ab9-684feaae4aec" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: E1013 07:03:17.947433 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca5c6d84-14c0-410b-9e05-610f875bdb0a" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.947438 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca5c6d84-14c0-410b-9e05-610f875bdb0a" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: E1013 07:03:17.947466 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02b8efa-0c69-4122-a4d1-811d06cf6ac1" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.947472 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02b8efa-0c69-4122-a4d1-811d06cf6ac1" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.948494 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="d02b8efa-0c69-4122-a4d1-811d06cf6ac1" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.948518 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca5c6d84-14c0-410b-9e05-610f875bdb0a" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.948531 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ac9ed1e-0ef3-48b5-be63-a75276e406e7" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.948542 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="21047ce5-4bdf-4660-a953-ab87a8e5e5e1" containerName="keystone-db-sync" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.948551 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6764cf07-0dd7-4ce1-8ab9-684feaae4aec" containerName="mariadb-account-create" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.949464 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.959847 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84c8b5579c-gl2vc"] Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.974127 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-dgpx8"] Oct 13 07:03:17 crc kubenswrapper[4664]: I1013 07:03:17.975464 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:17.994235 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:17.994421 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kssk7" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:17.994573 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:17.994676 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.034730 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dgpx8"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.104120 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-2x2fr"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.105217 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.113402 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-vc7gh" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114365 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114469 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-fernet-keys\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114516 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-config\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114533 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-sb\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114548 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-scripts\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114568 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-credential-keys\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114590 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-svc\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114611 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-nb\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114630 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt87d\" (UniqueName: \"kubernetes.io/projected/f059a76d-674a-414a-aa7d-25b9db386d1a-kube-api-access-mt87d\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114665 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-combined-ca-bundle\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114691 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4c9t\" (UniqueName: \"kubernetes.io/projected/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-kube-api-access-h4c9t\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114728 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-config-data\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.114744 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-swift-storage-0\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.118270 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: connect: connection refused" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.163914 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-2x2fr"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.210905 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-jbdfp"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.212175 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217577 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-combined-ca-bundle\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217630 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4c9t\" (UniqueName: \"kubernetes.io/projected/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-kube-api-access-h4c9t\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217667 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-config-data\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217690 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-combined-ca-bundle\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217723 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-config-data\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217740 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-swift-storage-0\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217789 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-fernet-keys\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217837 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-config\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217852 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-sb\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217865 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-scripts\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217886 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-credential-keys\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217918 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-svc\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217938 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rh476\" (UniqueName: \"kubernetes.io/projected/ad23ea69-3e65-4f4c-afdc-21abded4e19c-kube-api-access-rh476\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217971 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-nb\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.217991 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt87d\" (UniqueName: \"kubernetes.io/projected/f059a76d-674a-414a-aa7d-25b9db386d1a-kube-api-access-mt87d\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.218646 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sqpcs" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.218815 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.218936 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.220676 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-sb\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.226406 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-scripts\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.227728 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-swift-storage-0\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.232901 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-config\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.234266 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-nb\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.234769 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-svc\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.246622 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-fernet-keys\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.246955 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-combined-ca-bundle\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.249040 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-config-data\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.249863 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-credential-keys\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.268381 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jbdfp"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.277207 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4c9t\" (UniqueName: \"kubernetes.io/projected/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-kube-api-access-h4c9t\") pod \"keystone-bootstrap-dgpx8\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.300452 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt87d\" (UniqueName: \"kubernetes.io/projected/f059a76d-674a-414a-aa7d-25b9db386d1a-kube-api-access-mt87d\") pod \"dnsmasq-dns-84c8b5579c-gl2vc\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319014 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-config-data\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319055 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-combined-ca-bundle\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319124 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzmkq\" (UniqueName: \"kubernetes.io/projected/0b2b826f-7b2a-4de6-9f80-7c854b988a67-kube-api-access-fzmkq\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319144 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-db-sync-config-data\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319164 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-config-data\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319193 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-scripts\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319220 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rh476\" (UniqueName: \"kubernetes.io/projected/ad23ea69-3e65-4f4c-afdc-21abded4e19c-kube-api-access-rh476\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319241 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-combined-ca-bundle\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.319276 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0b2b826f-7b2a-4de6-9f80-7c854b988a67-etc-machine-id\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.328979 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-combined-ca-bundle\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.332906 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-config-data\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.359688 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-757c9cbd7-c6dbp"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.361127 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.362062 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.368261 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-6xkml" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.368446 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.368527 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.368649 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.374715 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.395788 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.410811 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-757c9cbd7-c6dbp"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.410916 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.416494 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.416683 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rh476\" (UniqueName: \"kubernetes.io/projected/ad23ea69-3e65-4f4c-afdc-21abded4e19c-kube-api-access-rh476\") pod \"heat-db-sync-2x2fr\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.421534 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.427609 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0b2b826f-7b2a-4de6-9f80-7c854b988a67-etc-machine-id\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.427706 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzmkq\" (UniqueName: \"kubernetes.io/projected/0b2b826f-7b2a-4de6-9f80-7c854b988a67-kube-api-access-fzmkq\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.427726 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-db-sync-config-data\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.427744 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-config-data\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.427773 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-scripts\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.427812 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-combined-ca-bundle\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.430967 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-db-sync-config-data\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.431025 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0b2b826f-7b2a-4de6-9f80-7c854b988a67-etc-machine-id\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.437887 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2x2fr" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.450396 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-combined-ca-bundle\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.475672 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.496240 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-scripts\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.500218 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-config-data\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534225 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534307 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-scripts\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534332 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-log-httpd\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534427 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26mvb\" (UniqueName: \"kubernetes.io/projected/22180335-bd2f-4201-be9e-25b47d834e1c-kube-api-access-26mvb\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534571 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-run-httpd\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534629 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-config-data\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534650 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-scripts\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534670 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22180335-bd2f-4201-be9e-25b47d834e1c-logs\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534686 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-config-data\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534750 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqjvj\" (UniqueName: \"kubernetes.io/projected/da4413cf-00ec-4092-a86a-be0874b30c2c-kube-api-access-wqjvj\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534782 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.534837 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/22180335-bd2f-4201-be9e-25b47d834e1c-horizon-secret-key\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.535421 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzmkq\" (UniqueName: \"kubernetes.io/projected/0b2b826f-7b2a-4de6-9f80-7c854b988a67-kube-api-access-fzmkq\") pod \"cinder-db-sync-jbdfp\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.553469 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-nhrg9"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.566435 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.569749 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.569839 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hv5d5" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.569749 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.571341 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-g6lvp"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.576496 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.582872 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.583097 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2p8n2" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.604319 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-nhrg9"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636489 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqjvj\" (UniqueName: \"kubernetes.io/projected/da4413cf-00ec-4092-a86a-be0874b30c2c-kube-api-access-wqjvj\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636530 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636558 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/22180335-bd2f-4201-be9e-25b47d834e1c-horizon-secret-key\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636590 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636610 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-scripts\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636628 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/817edc64-6579-4cfd-97ab-705680d79119-logs\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636644 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-log-httpd\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636670 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djxtr\" (UniqueName: \"kubernetes.io/projected/817edc64-6579-4cfd-97ab-705680d79119-kube-api-access-djxtr\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636692 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-scripts\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636721 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26mvb\" (UniqueName: \"kubernetes.io/projected/22180335-bd2f-4201-be9e-25b47d834e1c-kube-api-access-26mvb\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636746 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-combined-ca-bundle\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636784 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-run-httpd\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636824 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-config-data\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636838 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-scripts\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636857 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22180335-bd2f-4201-be9e-25b47d834e1c-logs\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636872 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-config-data\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.636892 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-config-data\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.639648 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-log-httpd\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.640256 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-scripts\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.642117 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-run-httpd\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.642339 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22180335-bd2f-4201-be9e-25b47d834e1c-logs\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.645657 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-scripts\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.648423 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.649204 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-config-data\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.649932 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.650867 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-config-data\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.660008 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84c8b5579c-gl2vc"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.671240 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-qt7sf"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.672561 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.676570 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/22180335-bd2f-4201-be9e-25b47d834e1c-horizon-secret-key\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.689618 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-g6lvp"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.700228 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26mvb\" (UniqueName: \"kubernetes.io/projected/22180335-bd2f-4201-be9e-25b47d834e1c-kube-api-access-26mvb\") pod \"horizon-757c9cbd7-c6dbp\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.700738 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.700819 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.701377 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-t6k2k" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.702526 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.706157 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqjvj\" (UniqueName: \"kubernetes.io/projected/da4413cf-00ec-4092-a86a-be0874b30c2c-kube-api-access-wqjvj\") pod \"ceilometer-0\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.709250 4664 generic.go:334] "Generic (PLEG): container finished" podID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerID="7bee5e3b29e756f099e44fabfd82181457af418fb8c8c30ff1af9550520ee06b" exitCode=0 Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.709291 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" event={"ID":"caee2c49-8d8b-4df3-86b6-424791c35ff5","Type":"ContainerDied","Data":"7bee5e3b29e756f099e44fabfd82181457af418fb8c8c30ff1af9550520ee06b"} Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.727871 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-qt7sf"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.731722 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.760752 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/817edc64-6579-4cfd-97ab-705680d79119-logs\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.760853 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djxtr\" (UniqueName: \"kubernetes.io/projected/817edc64-6579-4cfd-97ab-705680d79119-kube-api-access-djxtr\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.760902 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-scripts\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.760961 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-combined-ca-bundle\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.760992 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktxfx\" (UniqueName: \"kubernetes.io/projected/dac71586-090c-42d0-b9a2-9f53b4937c09-kube-api-access-ktxfx\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.761073 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-combined-ca-bundle\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.761169 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-db-sync-config-data\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.761194 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-config-data\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.784743 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-combined-ca-bundle\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.785128 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/817edc64-6579-4cfd-97ab-705680d79119-logs\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.814620 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-config-data\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.814973 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-scripts\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.818670 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.824453 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cfbcdc87f-qnbnm"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.836879 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djxtr\" (UniqueName: \"kubernetes.io/projected/817edc64-6579-4cfd-97ab-705680d79119-kube-api-access-djxtr\") pod \"placement-db-sync-nhrg9\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.860759 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.867003 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prc9r\" (UniqueName: \"kubernetes.io/projected/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-kube-api-access-prc9r\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.867106 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktxfx\" (UniqueName: \"kubernetes.io/projected/dac71586-090c-42d0-b9a2-9f53b4937c09-kube-api-access-ktxfx\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.867223 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-combined-ca-bundle\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.867268 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-combined-ca-bundle\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.867343 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-db-sync-config-data\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.867405 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-config\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.885067 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-699b597b6f-7d58d"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.890637 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.900640 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-combined-ca-bundle\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.901086 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktxfx\" (UniqueName: \"kubernetes.io/projected/dac71586-090c-42d0-b9a2-9f53b4937c09-kube-api-access-ktxfx\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.902164 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-db-sync-config-data\") pod \"barbican-db-sync-g6lvp\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.925725 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-699b597b6f-7d58d"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.929834 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cfbcdc87f-qnbnm"] Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.939141 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nhrg9" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.973389 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-svc\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974195 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1d6c5c8-cced-49ff-8e66-35e961f49d06-logs\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974270 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-swift-storage-0\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974345 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-config-data\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974435 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-config\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974512 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c1d6c5c8-cced-49ff-8e66-35e961f49d06-horizon-secret-key\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974577 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-config\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974647 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-scripts\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974721 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-sb\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974804 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prc9r\" (UniqueName: \"kubernetes.io/projected/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-kube-api-access-prc9r\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.974868 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-nb\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.975743 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4q8g\" (UniqueName: \"kubernetes.io/projected/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-kube-api-access-t4q8g\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.975856 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-combined-ca-bundle\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.976091 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pn9kp\" (UniqueName: \"kubernetes.io/projected/c1d6c5c8-cced-49ff-8e66-35e961f49d06-kube-api-access-pn9kp\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.983177 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.983312 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.991755 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-config\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:18 crc kubenswrapper[4664]: I1013 07:03:18.993092 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-combined-ca-bundle\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.013485 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prc9r\" (UniqueName: \"kubernetes.io/projected/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-kube-api-access-prc9r\") pod \"neutron-db-sync-qt7sf\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.013882 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.077577 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-sb\") pod \"caee2c49-8d8b-4df3-86b6-424791c35ff5\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.077620 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crztk\" (UniqueName: \"kubernetes.io/projected/caee2c49-8d8b-4df3-86b6-424791c35ff5-kube-api-access-crztk\") pod \"caee2c49-8d8b-4df3-86b6-424791c35ff5\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.077652 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-config\") pod \"caee2c49-8d8b-4df3-86b6-424791c35ff5\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.077683 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-swift-storage-0\") pod \"caee2c49-8d8b-4df3-86b6-424791c35ff5\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.077756 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-nb\") pod \"caee2c49-8d8b-4df3-86b6-424791c35ff5\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.077869 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-svc\") pod \"caee2c49-8d8b-4df3-86b6-424791c35ff5\" (UID: \"caee2c49-8d8b-4df3-86b6-424791c35ff5\") " Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078071 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-swift-storage-0\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078100 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-config-data\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078132 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c1d6c5c8-cced-49ff-8e66-35e961f49d06-horizon-secret-key\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078149 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-config\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078173 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-scripts\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078198 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-sb\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078224 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-nb\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078273 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4q8g\" (UniqueName: \"kubernetes.io/projected/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-kube-api-access-t4q8g\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078305 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pn9kp\" (UniqueName: \"kubernetes.io/projected/c1d6c5c8-cced-49ff-8e66-35e961f49d06-kube-api-access-pn9kp\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078331 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-svc\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078359 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1d6c5c8-cced-49ff-8e66-35e961f49d06-logs\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.078716 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1d6c5c8-cced-49ff-8e66-35e961f49d06-logs\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.082677 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-config\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.083240 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-nb\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.088336 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-sb\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.089272 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-swift-storage-0\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.095621 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-config-data\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.096263 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-svc\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.102732 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-scripts\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.155060 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4q8g\" (UniqueName: \"kubernetes.io/projected/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-kube-api-access-t4q8g\") pod \"dnsmasq-dns-cfbcdc87f-qnbnm\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.170170 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caee2c49-8d8b-4df3-86b6-424791c35ff5-kube-api-access-crztk" (OuterVolumeSpecName: "kube-api-access-crztk") pod "caee2c49-8d8b-4df3-86b6-424791c35ff5" (UID: "caee2c49-8d8b-4df3-86b6-424791c35ff5"). InnerVolumeSpecName "kube-api-access-crztk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.171530 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c1d6c5c8-cced-49ff-8e66-35e961f49d06-horizon-secret-key\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.181133 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crztk\" (UniqueName: \"kubernetes.io/projected/caee2c49-8d8b-4df3-86b6-424791c35ff5-kube-api-access-crztk\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.195113 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pn9kp\" (UniqueName: \"kubernetes.io/projected/c1d6c5c8-cced-49ff-8e66-35e961f49d06-kube-api-access-pn9kp\") pod \"horizon-699b597b6f-7d58d\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.234619 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.250464 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.300169 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "caee2c49-8d8b-4df3-86b6-424791c35ff5" (UID: "caee2c49-8d8b-4df3-86b6-424791c35ff5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.332912 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "caee2c49-8d8b-4df3-86b6-424791c35ff5" (UID: "caee2c49-8d8b-4df3-86b6-424791c35ff5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.350122 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-2x2fr"] Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.367342 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dgpx8"] Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.384176 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.384229 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.389637 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "caee2c49-8d8b-4df3-86b6-424791c35ff5" (UID: "caee2c49-8d8b-4df3-86b6-424791c35ff5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.402247 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "caee2c49-8d8b-4df3-86b6-424791c35ff5" (UID: "caee2c49-8d8b-4df3-86b6-424791c35ff5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.423326 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-config" (OuterVolumeSpecName: "config") pod "caee2c49-8d8b-4df3-86b6-424791c35ff5" (UID: "caee2c49-8d8b-4df3-86b6-424791c35ff5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.485950 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.486203 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.486213 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caee2c49-8d8b-4df3-86b6-424791c35ff5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.641631 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84c8b5579c-gl2vc"] Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.750746 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.750934 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dcf8755f-v8kgn" event={"ID":"caee2c49-8d8b-4df3-86b6-424791c35ff5","Type":"ContainerDied","Data":"e5aef54ec951912e8a66e05aebc80911713482c728b89e3e90ab3c57ec836ab8"} Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.751579 4664 scope.go:117] "RemoveContainer" containerID="7bee5e3b29e756f099e44fabfd82181457af418fb8c8c30ff1af9550520ee06b" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.769806 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2x2fr" event={"ID":"ad23ea69-3e65-4f4c-afdc-21abded4e19c","Type":"ContainerStarted","Data":"c6a1e5994f311fd061f347f12b8cd26072a103a0dd303041dc9e8562def65ff3"} Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.775010 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" event={"ID":"f059a76d-674a-414a-aa7d-25b9db386d1a","Type":"ContainerStarted","Data":"32f5254b374616436f7895dd148609022dd7ba45d01732492b40f64edf8b1fbf"} Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.787296 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dcf8755f-v8kgn"] Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.793957 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5dcf8755f-v8kgn"] Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.795020 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dgpx8" event={"ID":"1feec8e5-3ec8-4f43-9550-daf6d482e6b7","Type":"ContainerStarted","Data":"2b1ae8f01cb859ff0e8a1bde428582e5cc338daa0e0312d2cab0a0a1097c8712"} Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.795051 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dgpx8" event={"ID":"1feec8e5-3ec8-4f43-9550-daf6d482e6b7","Type":"ContainerStarted","Data":"991475d9027edf6846207baf0d892dcd10ade5f7de600e00dbe7d429833c3605"} Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.822655 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.835643 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-dgpx8" podStartSLOduration=2.835624921 podStartE2EDuration="2.835624921s" podCreationTimestamp="2025-10-13 07:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:03:19.834293206 +0000 UTC m=+1007.521738418" watchObservedRunningTime="2025-10-13 07:03:19.835624921 +0000 UTC m=+1007.523070113" Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.859680 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-757c9cbd7-c6dbp"] Oct 13 07:03:19 crc kubenswrapper[4664]: I1013 07:03:19.864003 4664 scope.go:117] "RemoveContainer" containerID="b4c7ddf2116a9f856cb9871fcf1a383f52ea39bb34d56d2431a0d2a37c32ab21" Oct 13 07:03:19 crc kubenswrapper[4664]: W1013 07:03:19.878181 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22180335_bd2f_4201_be9e_25b47d834e1c.slice/crio-4ff5af2b8228a481e1a20bd6c345805eed37730b3abb6d53a12594c6f190d0f8 WatchSource:0}: Error finding container 4ff5af2b8228a481e1a20bd6c345805eed37730b3abb6d53a12594c6f190d0f8: Status 404 returned error can't find the container with id 4ff5af2b8228a481e1a20bd6c345805eed37730b3abb6d53a12594c6f190d0f8 Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.055075 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-g6lvp"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.072928 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-nhrg9"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.092677 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jbdfp"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.111597 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-qt7sf"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.351555 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-699b597b6f-7d58d"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.366232 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cfbcdc87f-qnbnm"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.871546 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.897818 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-699b597b6f-7d58d"] Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.937066 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-757c9cbd7-c6dbp" event={"ID":"22180335-bd2f-4201-be9e-25b47d834e1c","Type":"ContainerStarted","Data":"4ff5af2b8228a481e1a20bd6c345805eed37730b3abb6d53a12594c6f190d0f8"} Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.944941 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qt7sf" event={"ID":"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7","Type":"ContainerStarted","Data":"819fd8eb6e6306700eacbdbd5c37bde81115db45c2d2b535ce4c8b56e94b4228"} Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.956157 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-699b597b6f-7d58d" event={"ID":"c1d6c5c8-cced-49ff-8e66-35e961f49d06","Type":"ContainerStarted","Data":"94c7ce1da41ae083aa5c3ae7824ad6c0091fc9e9295b9051819b2cf25d6129d4"} Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.965515 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nhrg9" event={"ID":"817edc64-6579-4cfd-97ab-705680d79119","Type":"ContainerStarted","Data":"24328cde770d81269c4c1d3318a3f5acd1e823e32a54c1fed4723c4cfeb02ba4"} Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.983115 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g6lvp" event={"ID":"dac71586-090c-42d0-b9a2-9f53b4937c09","Type":"ContainerStarted","Data":"ab1002a8bcd2a95b2ad69e57b9314dfc6ee1b0e69b6f19d3c11d14b005e18a77"} Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.991285 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jbdfp" event={"ID":"0b2b826f-7b2a-4de6-9f80-7c854b988a67","Type":"ContainerStarted","Data":"6374d70dab4441c07cffed87f340fc697ee18aebf96291a5b3e94813b9bbea84"} Oct 13 07:03:20 crc kubenswrapper[4664]: I1013 07:03:20.993933 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" event={"ID":"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738","Type":"ContainerStarted","Data":"525b95323eeea6911ee96b219f7d78c98b37ccec50fc69896a76dd302bca3750"} Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:20.995817 4664 generic.go:334] "Generic (PLEG): container finished" podID="f059a76d-674a-414a-aa7d-25b9db386d1a" containerID="1d760850d0dda17c1b901a69805c46f4960012899990189012507bc34035fc51" exitCode=0 Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:20.995858 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" event={"ID":"f059a76d-674a-414a-aa7d-25b9db386d1a","Type":"ContainerDied","Data":"1d760850d0dda17c1b901a69805c46f4960012899990189012507bc34035fc51"} Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.031936 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerStarted","Data":"143f1d056ee23413e45733d1d0364d11485b942f83d8739d5a6b0a5d248c22f5"} Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.035070 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fc54c47d5-5tdpn"] Oct 13 07:03:21 crc kubenswrapper[4664]: E1013 07:03:21.035419 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerName="init" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.035437 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerName="init" Oct 13 07:03:21 crc kubenswrapper[4664]: E1013 07:03:21.035472 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerName="dnsmasq-dns" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.035479 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerName="dnsmasq-dns" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.035644 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" containerName="dnsmasq-dns" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.036517 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.089349 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caee2c49-8d8b-4df3-86b6-424791c35ff5" path="/var/lib/kubelet/pods/caee2c49-8d8b-4df3-86b6-424791c35ff5/volumes" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.096734 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fc54c47d5-5tdpn"] Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.150446 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bd11c3c2-9172-4025-a9da-d6b046b80e55-horizon-secret-key\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.150988 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjsgx\" (UniqueName: \"kubernetes.io/projected/bd11c3c2-9172-4025-a9da-d6b046b80e55-kube-api-access-kjsgx\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.151121 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-config-data\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.151216 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd11c3c2-9172-4025-a9da-d6b046b80e55-logs\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.151323 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-scripts\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.254082 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-scripts\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.254172 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bd11c3c2-9172-4025-a9da-d6b046b80e55-horizon-secret-key\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.254205 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjsgx\" (UniqueName: \"kubernetes.io/projected/bd11c3c2-9172-4025-a9da-d6b046b80e55-kube-api-access-kjsgx\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.254245 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-config-data\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.254272 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd11c3c2-9172-4025-a9da-d6b046b80e55-logs\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.254732 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd11c3c2-9172-4025-a9da-d6b046b80e55-logs\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.255919 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-config-data\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.256029 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-scripts\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.273959 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bd11c3c2-9172-4025-a9da-d6b046b80e55-horizon-secret-key\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.276544 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjsgx\" (UniqueName: \"kubernetes.io/projected/bd11c3c2-9172-4025-a9da-d6b046b80e55-kube-api-access-kjsgx\") pod \"horizon-5fc54c47d5-5tdpn\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.400652 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.589056 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.659946 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-swift-storage-0\") pod \"f059a76d-674a-414a-aa7d-25b9db386d1a\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.661010 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-config\") pod \"f059a76d-674a-414a-aa7d-25b9db386d1a\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.661044 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-nb\") pod \"f059a76d-674a-414a-aa7d-25b9db386d1a\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.661089 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-svc\") pod \"f059a76d-674a-414a-aa7d-25b9db386d1a\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.661135 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt87d\" (UniqueName: \"kubernetes.io/projected/f059a76d-674a-414a-aa7d-25b9db386d1a-kube-api-access-mt87d\") pod \"f059a76d-674a-414a-aa7d-25b9db386d1a\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.661235 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-sb\") pod \"f059a76d-674a-414a-aa7d-25b9db386d1a\" (UID: \"f059a76d-674a-414a-aa7d-25b9db386d1a\") " Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.723102 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f059a76d-674a-414a-aa7d-25b9db386d1a-kube-api-access-mt87d" (OuterVolumeSpecName: "kube-api-access-mt87d") pod "f059a76d-674a-414a-aa7d-25b9db386d1a" (UID: "f059a76d-674a-414a-aa7d-25b9db386d1a"). InnerVolumeSpecName "kube-api-access-mt87d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.749189 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-config" (OuterVolumeSpecName: "config") pod "f059a76d-674a-414a-aa7d-25b9db386d1a" (UID: "f059a76d-674a-414a-aa7d-25b9db386d1a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.761353 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f059a76d-674a-414a-aa7d-25b9db386d1a" (UID: "f059a76d-674a-414a-aa7d-25b9db386d1a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.761531 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f059a76d-674a-414a-aa7d-25b9db386d1a" (UID: "f059a76d-674a-414a-aa7d-25b9db386d1a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.764539 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.764566 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.764575 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt87d\" (UniqueName: \"kubernetes.io/projected/f059a76d-674a-414a-aa7d-25b9db386d1a-kube-api-access-mt87d\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.764583 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.767548 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f059a76d-674a-414a-aa7d-25b9db386d1a" (UID: "f059a76d-674a-414a-aa7d-25b9db386d1a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.796331 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f059a76d-674a-414a-aa7d-25b9db386d1a" (UID: "f059a76d-674a-414a-aa7d-25b9db386d1a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.866162 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:21 crc kubenswrapper[4664]: I1013 07:03:21.866196 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f059a76d-674a-414a-aa7d-25b9db386d1a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.008174 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fc54c47d5-5tdpn"] Oct 13 07:03:22 crc kubenswrapper[4664]: W1013 07:03:22.085054 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd11c3c2_9172_4025_a9da_d6b046b80e55.slice/crio-fe62456bebc3c8f1d6e8b29f75a1e5cd00e1ba20e75f8e789d2b4bae8b568c8a WatchSource:0}: Error finding container fe62456bebc3c8f1d6e8b29f75a1e5cd00e1ba20e75f8e789d2b4bae8b568c8a: Status 404 returned error can't find the container with id fe62456bebc3c8f1d6e8b29f75a1e5cd00e1ba20e75f8e789d2b4bae8b568c8a Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.116546 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" event={"ID":"f059a76d-674a-414a-aa7d-25b9db386d1a","Type":"ContainerDied","Data":"32f5254b374616436f7895dd148609022dd7ba45d01732492b40f64edf8b1fbf"} Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.116599 4664 scope.go:117] "RemoveContainer" containerID="1d760850d0dda17c1b901a69805c46f4960012899990189012507bc34035fc51" Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.116719 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8b5579c-gl2vc" Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.126237 4664 generic.go:334] "Generic (PLEG): container finished" podID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerID="acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f" exitCode=0 Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.126294 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" event={"ID":"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738","Type":"ContainerDied","Data":"acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f"} Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.135140 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qt7sf" event={"ID":"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7","Type":"ContainerStarted","Data":"d9d76b10d66b97d7dd0eff360b656d3dda15c232bcc7d475885e3a87efa557b6"} Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.212373 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-qt7sf" podStartSLOduration=4.212356549 podStartE2EDuration="4.212356549s" podCreationTimestamp="2025-10-13 07:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:03:22.188530392 +0000 UTC m=+1009.875975584" watchObservedRunningTime="2025-10-13 07:03:22.212356549 +0000 UTC m=+1009.899801731" Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.254371 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84c8b5579c-gl2vc"] Oct 13 07:03:22 crc kubenswrapper[4664]: I1013 07:03:22.287503 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84c8b5579c-gl2vc"] Oct 13 07:03:23 crc kubenswrapper[4664]: I1013 07:03:23.108706 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f059a76d-674a-414a-aa7d-25b9db386d1a" path="/var/lib/kubelet/pods/f059a76d-674a-414a-aa7d-25b9db386d1a/volumes" Oct 13 07:03:23 crc kubenswrapper[4664]: I1013 07:03:23.266036 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc54c47d5-5tdpn" event={"ID":"bd11c3c2-9172-4025-a9da-d6b046b80e55","Type":"ContainerStarted","Data":"fe62456bebc3c8f1d6e8b29f75a1e5cd00e1ba20e75f8e789d2b4bae8b568c8a"} Oct 13 07:03:23 crc kubenswrapper[4664]: I1013 07:03:23.272224 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" event={"ID":"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738","Type":"ContainerStarted","Data":"0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8"} Oct 13 07:03:23 crc kubenswrapper[4664]: I1013 07:03:23.272310 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:23 crc kubenswrapper[4664]: I1013 07:03:23.300669 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" podStartSLOduration=5.300647538 podStartE2EDuration="5.300647538s" podCreationTimestamp="2025-10-13 07:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:03:23.296335843 +0000 UTC m=+1010.983781045" watchObservedRunningTime="2025-10-13 07:03:23.300647538 +0000 UTC m=+1010.988092730" Oct 13 07:03:25 crc kubenswrapper[4664]: I1013 07:03:25.313180 4664 generic.go:334] "Generic (PLEG): container finished" podID="1feec8e5-3ec8-4f43-9550-daf6d482e6b7" containerID="2b1ae8f01cb859ff0e8a1bde428582e5cc338daa0e0312d2cab0a0a1097c8712" exitCode=0 Oct 13 07:03:25 crc kubenswrapper[4664]: I1013 07:03:25.313249 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dgpx8" event={"ID":"1feec8e5-3ec8-4f43-9550-daf6d482e6b7","Type":"ContainerDied","Data":"2b1ae8f01cb859ff0e8a1bde428582e5cc338daa0e0312d2cab0a0a1097c8712"} Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.761263 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.870449 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4c9t\" (UniqueName: \"kubernetes.io/projected/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-kube-api-access-h4c9t\") pod \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.870741 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-scripts\") pod \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.870769 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-fernet-keys\") pod \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.870837 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-combined-ca-bundle\") pod \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.871003 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-credential-keys\") pod \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.871023 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-config-data\") pod \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\" (UID: \"1feec8e5-3ec8-4f43-9550-daf6d482e6b7\") " Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.877139 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-scripts" (OuterVolumeSpecName: "scripts") pod "1feec8e5-3ec8-4f43-9550-daf6d482e6b7" (UID: "1feec8e5-3ec8-4f43-9550-daf6d482e6b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.886946 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "1feec8e5-3ec8-4f43-9550-daf6d482e6b7" (UID: "1feec8e5-3ec8-4f43-9550-daf6d482e6b7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.887621 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-kube-api-access-h4c9t" (OuterVolumeSpecName: "kube-api-access-h4c9t") pod "1feec8e5-3ec8-4f43-9550-daf6d482e6b7" (UID: "1feec8e5-3ec8-4f43-9550-daf6d482e6b7"). InnerVolumeSpecName "kube-api-access-h4c9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.907929 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1feec8e5-3ec8-4f43-9550-daf6d482e6b7" (UID: "1feec8e5-3ec8-4f43-9550-daf6d482e6b7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.916551 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-config-data" (OuterVolumeSpecName: "config-data") pod "1feec8e5-3ec8-4f43-9550-daf6d482e6b7" (UID: "1feec8e5-3ec8-4f43-9550-daf6d482e6b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.954547 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1feec8e5-3ec8-4f43-9550-daf6d482e6b7" (UID: "1feec8e5-3ec8-4f43-9550-daf6d482e6b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.972723 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.972752 4664 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.972764 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.972772 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4c9t\" (UniqueName: \"kubernetes.io/projected/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-kube-api-access-h4c9t\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.972782 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:26 crc kubenswrapper[4664]: I1013 07:03:26.972790 4664 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1feec8e5-3ec8-4f43-9550-daf6d482e6b7-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.369992 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dgpx8" event={"ID":"1feec8e5-3ec8-4f43-9550-daf6d482e6b7","Type":"ContainerDied","Data":"991475d9027edf6846207baf0d892dcd10ade5f7de600e00dbe7d429833c3605"} Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.370029 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="991475d9027edf6846207baf0d892dcd10ade5f7de600e00dbe7d429833c3605" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.370048 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dgpx8" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.372969 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-757c9cbd7-c6dbp"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.410192 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8487d6c5d4-cgnm9"] Oct 13 07:03:27 crc kubenswrapper[4664]: E1013 07:03:27.410553 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1feec8e5-3ec8-4f43-9550-daf6d482e6b7" containerName="keystone-bootstrap" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.410570 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1feec8e5-3ec8-4f43-9550-daf6d482e6b7" containerName="keystone-bootstrap" Oct 13 07:03:27 crc kubenswrapper[4664]: E1013 07:03:27.410582 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f059a76d-674a-414a-aa7d-25b9db386d1a" containerName="init" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.410601 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f059a76d-674a-414a-aa7d-25b9db386d1a" containerName="init" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.410778 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="1feec8e5-3ec8-4f43-9550-daf6d482e6b7" containerName="keystone-bootstrap" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.410817 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f059a76d-674a-414a-aa7d-25b9db386d1a" containerName="init" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.411804 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.418937 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.440843 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8487d6c5d4-cgnm9"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.471984 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-dgpx8"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489068 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-secret-key\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489111 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-combined-ca-bundle\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489145 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efaa5cce-79f5-4cdb-abf0-06b59765b776-logs\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489180 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-scripts\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489212 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-config-data\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489241 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-tls-certs\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489269 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5vml\" (UniqueName: \"kubernetes.io/projected/efaa5cce-79f5-4cdb-abf0-06b59765b776-kube-api-access-f5vml\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.489343 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-dgpx8"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.535517 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fc54c47d5-5tdpn"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.547608 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7d78c558d-rjg4v"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.548962 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.562099 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7d78c558d-rjg4v"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.579018 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-tvqs6"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.582509 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.588518 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kssk7" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.588972 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.589125 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.589255 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.590991 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-secret-key\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.591042 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-combined-ca-bundle\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.591090 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efaa5cce-79f5-4cdb-abf0-06b59765b776-logs\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.591135 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-scripts\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.591171 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-config-data\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.591205 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-tls-certs\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.591240 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5vml\" (UniqueName: \"kubernetes.io/projected/efaa5cce-79f5-4cdb-abf0-06b59765b776-kube-api-access-f5vml\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.592320 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-scripts\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.594924 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-config-data\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.597078 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efaa5cce-79f5-4cdb-abf0-06b59765b776-logs\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.614866 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-tls-certs\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.617046 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5vml\" (UniqueName: \"kubernetes.io/projected/efaa5cce-79f5-4cdb-abf0-06b59765b776-kube-api-access-f5vml\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.617892 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-combined-ca-bundle\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.622139 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-secret-key\") pod \"horizon-8487d6c5d4-cgnm9\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.622210 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-tvqs6"] Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692495 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/786f35fd-a7cc-4749-bc5e-47c28ffa4245-config-data\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692591 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-credential-keys\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692618 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-horizon-tls-certs\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692651 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-scripts\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692701 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8cmg\" (UniqueName: \"kubernetes.io/projected/786f35fd-a7cc-4749-bc5e-47c28ffa4245-kube-api-access-v8cmg\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692731 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-config-data\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692847 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-combined-ca-bundle\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692876 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-combined-ca-bundle\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692903 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-fernet-keys\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692934 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/786f35fd-a7cc-4749-bc5e-47c28ffa4245-scripts\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.692981 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/786f35fd-a7cc-4749-bc5e-47c28ffa4245-logs\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.693001 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txkzx\" (UniqueName: \"kubernetes.io/projected/3145c64c-4c70-4445-9ad2-bf492cb74e64-kube-api-access-txkzx\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.693025 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-horizon-secret-key\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.738216 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.793947 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-credential-keys\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.793989 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-horizon-tls-certs\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794010 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-scripts\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794066 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8cmg\" (UniqueName: \"kubernetes.io/projected/786f35fd-a7cc-4749-bc5e-47c28ffa4245-kube-api-access-v8cmg\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794088 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-config-data\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794121 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-combined-ca-bundle\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794140 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-combined-ca-bundle\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794156 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-fernet-keys\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794181 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/786f35fd-a7cc-4749-bc5e-47c28ffa4245-scripts\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794236 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/786f35fd-a7cc-4749-bc5e-47c28ffa4245-logs\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794260 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txkzx\" (UniqueName: \"kubernetes.io/projected/3145c64c-4c70-4445-9ad2-bf492cb74e64-kube-api-access-txkzx\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794290 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-horizon-secret-key\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.794314 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/786f35fd-a7cc-4749-bc5e-47c28ffa4245-config-data\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.795056 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/786f35fd-a7cc-4749-bc5e-47c28ffa4245-scripts\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.795496 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/786f35fd-a7cc-4749-bc5e-47c28ffa4245-config-data\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.795747 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/786f35fd-a7cc-4749-bc5e-47c28ffa4245-logs\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.800745 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-combined-ca-bundle\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.801406 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-fernet-keys\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.802413 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-config-data\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.803247 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-combined-ca-bundle\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.809998 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-horizon-tls-certs\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.810850 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/786f35fd-a7cc-4749-bc5e-47c28ffa4245-horizon-secret-key\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.815219 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txkzx\" (UniqueName: \"kubernetes.io/projected/3145c64c-4c70-4445-9ad2-bf492cb74e64-kube-api-access-txkzx\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.816859 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-credential-keys\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.817389 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8cmg\" (UniqueName: \"kubernetes.io/projected/786f35fd-a7cc-4749-bc5e-47c28ffa4245-kube-api-access-v8cmg\") pod \"horizon-7d78c558d-rjg4v\" (UID: \"786f35fd-a7cc-4749-bc5e-47c28ffa4245\") " pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.818078 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-scripts\") pod \"keystone-bootstrap-tvqs6\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.870335 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:03:27 crc kubenswrapper[4664]: I1013 07:03:27.963216 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:03:28 crc kubenswrapper[4664]: I1013 07:03:28.812065 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:03:28 crc kubenswrapper[4664]: I1013 07:03:28.812124 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:03:29 crc kubenswrapper[4664]: I1013 07:03:29.060030 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1feec8e5-3ec8-4f43-9550-daf6d482e6b7" path="/var/lib/kubelet/pods/1feec8e5-3ec8-4f43-9550-daf6d482e6b7/volumes" Oct 13 07:03:29 crc kubenswrapper[4664]: I1013 07:03:29.237844 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:03:29 crc kubenswrapper[4664]: I1013 07:03:29.313123 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d5c4f869-tbdgk"] Oct 13 07:03:29 crc kubenswrapper[4664]: I1013 07:03:29.313345 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" containerID="cri-o://4a1606c49d573aaac49165605e04dec2b558f744092d222b91cc3a66f5ba5305" gracePeriod=10 Oct 13 07:03:29 crc kubenswrapper[4664]: E1013 07:03:29.514584 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42f42e84_f556_4409_8f8f_3f7f529681e8.slice/crio-conmon-4a1606c49d573aaac49165605e04dec2b558f744092d222b91cc3a66f5ba5305.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42f42e84_f556_4409_8f8f_3f7f529681e8.slice/crio-4a1606c49d573aaac49165605e04dec2b558f744092d222b91cc3a66f5ba5305.scope\": RecentStats: unable to find data in memory cache]" Oct 13 07:03:30 crc kubenswrapper[4664]: I1013 07:03:30.417699 4664 generic.go:334] "Generic (PLEG): container finished" podID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerID="4a1606c49d573aaac49165605e04dec2b558f744092d222b91cc3a66f5ba5305" exitCode=0 Oct 13 07:03:30 crc kubenswrapper[4664]: I1013 07:03:30.417965 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" event={"ID":"42f42e84-f556-4409-8f8f-3f7f529681e8","Type":"ContainerDied","Data":"4a1606c49d573aaac49165605e04dec2b558f744092d222b91cc3a66f5ba5305"} Oct 13 07:03:32 crc kubenswrapper[4664]: I1013 07:03:32.636539 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Oct 13 07:03:37 crc kubenswrapper[4664]: I1013 07:03:37.636408 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Oct 13 07:03:39 crc kubenswrapper[4664]: I1013 07:03:39.499966 4664 generic.go:334] "Generic (PLEG): container finished" podID="57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" containerID="d9d76b10d66b97d7dd0eff360b656d3dda15c232bcc7d475885e3a87efa557b6" exitCode=0 Oct 13 07:03:39 crc kubenswrapper[4664]: I1013 07:03:39.500219 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qt7sf" event={"ID":"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7","Type":"ContainerDied","Data":"d9d76b10d66b97d7dd0eff360b656d3dda15c232bcc7d475885e3a87efa557b6"} Oct 13 07:03:42 crc kubenswrapper[4664]: I1013 07:03:42.637095 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Oct 13 07:03:42 crc kubenswrapper[4664]: I1013 07:03:42.637657 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:03:44 crc kubenswrapper[4664]: E1013 07:03:44.754180 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:44 crc kubenswrapper[4664]: E1013 07:03:44.754559 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:44 crc kubenswrapper[4664]: E1013 07:03:44.754770 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nch645h577h697h695h9dh59h677h7h78h649h554h68h8bh67bh579h64dhb5h55fh5dh7ch666h587h64bh66bh67ch54fh85h67bh74h86h686q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kjsgx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5fc54c47d5-5tdpn_openstack(bd11c3c2-9172-4025-a9da-d6b046b80e55): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:03:44 crc kubenswrapper[4664]: E1013 07:03:44.759710 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849\\\"\"]" pod="openstack/horizon-5fc54c47d5-5tdpn" podUID="bd11c3c2-9172-4025-a9da-d6b046b80e55" Oct 13 07:03:45 crc kubenswrapper[4664]: E1013 07:03:45.314013 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:45 crc kubenswrapper[4664]: E1013 07:03:45.314064 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:45 crc kubenswrapper[4664]: E1013 07:03:45.314171 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ktxfx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-g6lvp_openstack(dac71586-090c-42d0-b9a2-9f53b4937c09): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:03:45 crc kubenswrapper[4664]: E1013 07:03:45.315340 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-g6lvp" podUID="dac71586-090c-42d0-b9a2-9f53b4937c09" Oct 13 07:03:45 crc kubenswrapper[4664]: E1013 07:03:45.564406 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/barbican-db-sync-g6lvp" podUID="dac71586-090c-42d0-b9a2-9f53b4937c09" Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.637308 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.773141 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-placement-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.773587 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-placement-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.773759 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-placement-api:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-djxtr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-nhrg9_openstack(817edc64-6579-4cfd-97ab-705680d79119): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.777775 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-nhrg9" podUID="817edc64-6579-4cfd-97ab-705680d79119" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.799977 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.800044 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.800220 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f9h5fdh564h5b8h557hddh6ch67fh68ch664h549h5b7h699h5dch594h64bh68dh674h58bhfdhdfh88h66dh5f8h66ch666h599h97h57bh65bhd6h696q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-26mvb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-757c9cbd7-c6dbp_openstack(22180335-bd2f-4201-be9e-25b47d834e1c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.802967 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849\\\"\"]" pod="openstack/horizon-757c9cbd7-c6dbp" podUID="22180335-bd2f-4201-be9e-25b47d834e1c" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.816966 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.817039 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.817212 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nbhcfh599h5cfhbch66bh69h69h677h5fchb6h89h5dfhc8h59ch65ch565h57dh65chd9h564h59bh557h555h5bbh686h5f9hch656h76h96h85q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pn9kp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-699b597b6f-7d58d_openstack(c1d6c5c8-cced-49ff-8e66-35e961f49d06): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:03:47 crc kubenswrapper[4664]: E1013 07:03:47.821371 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:92672cd85fd36317d65faa0525acf849\\\"\"]" pod="openstack/horizon-699b597b6f-7d58d" podUID="c1d6c5c8-cced-49ff-8e66-35e961f49d06" Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.875576 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.973564 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-config-data\") pod \"bd11c3c2-9172-4025-a9da-d6b046b80e55\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.973615 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd11c3c2-9172-4025-a9da-d6b046b80e55-logs\") pod \"bd11c3c2-9172-4025-a9da-d6b046b80e55\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.973682 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-scripts\") pod \"bd11c3c2-9172-4025-a9da-d6b046b80e55\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.973861 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjsgx\" (UniqueName: \"kubernetes.io/projected/bd11c3c2-9172-4025-a9da-d6b046b80e55-kube-api-access-kjsgx\") pod \"bd11c3c2-9172-4025-a9da-d6b046b80e55\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.973896 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bd11c3c2-9172-4025-a9da-d6b046b80e55-horizon-secret-key\") pod \"bd11c3c2-9172-4025-a9da-d6b046b80e55\" (UID: \"bd11c3c2-9172-4025-a9da-d6b046b80e55\") " Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.975365 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-scripts" (OuterVolumeSpecName: "scripts") pod "bd11c3c2-9172-4025-a9da-d6b046b80e55" (UID: "bd11c3c2-9172-4025-a9da-d6b046b80e55"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.975636 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-config-data" (OuterVolumeSpecName: "config-data") pod "bd11c3c2-9172-4025-a9da-d6b046b80e55" (UID: "bd11c3c2-9172-4025-a9da-d6b046b80e55"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.976284 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd11c3c2-9172-4025-a9da-d6b046b80e55-logs" (OuterVolumeSpecName: "logs") pod "bd11c3c2-9172-4025-a9da-d6b046b80e55" (UID: "bd11c3c2-9172-4025-a9da-d6b046b80e55"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.995005 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd11c3c2-9172-4025-a9da-d6b046b80e55-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "bd11c3c2-9172-4025-a9da-d6b046b80e55" (UID: "bd11c3c2-9172-4025-a9da-d6b046b80e55"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:03:47 crc kubenswrapper[4664]: I1013 07:03:47.995025 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd11c3c2-9172-4025-a9da-d6b046b80e55-kube-api-access-kjsgx" (OuterVolumeSpecName: "kube-api-access-kjsgx") pod "bd11c3c2-9172-4025-a9da-d6b046b80e55" (UID: "bd11c3c2-9172-4025-a9da-d6b046b80e55"). InnerVolumeSpecName "kube-api-access-kjsgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.076509 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjsgx\" (UniqueName: \"kubernetes.io/projected/bd11c3c2-9172-4025-a9da-d6b046b80e55-kube-api-access-kjsgx\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.076547 4664 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/bd11c3c2-9172-4025-a9da-d6b046b80e55-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.076559 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.076571 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd11c3c2-9172-4025-a9da-d6b046b80e55-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.076582 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd11c3c2-9172-4025-a9da-d6b046b80e55-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:03:48 crc kubenswrapper[4664]: E1013 07:03:48.240052 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-ceilometer-central:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:48 crc kubenswrapper[4664]: E1013 07:03:48.240115 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-ceilometer-central:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:48 crc kubenswrapper[4664]: E1013 07:03:48.240533 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-ceilometer-central:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n587h577hb8h676hffhc8h657h5ddh55hd7h56fh549h594h598hddh54fh5f7hb5hc9h555hcfh5f4h7ch7hc9h64dh65ch574h645h56fh65chf7q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wqjvj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(da4413cf-00ec-4092-a86a-be0874b30c2c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.587113 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc54c47d5-5tdpn" event={"ID":"bd11c3c2-9172-4025-a9da-d6b046b80e55","Type":"ContainerDied","Data":"fe62456bebc3c8f1d6e8b29f75a1e5cd00e1ba20e75f8e789d2b4bae8b568c8a"} Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.587208 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc54c47d5-5tdpn" Oct 13 07:03:48 crc kubenswrapper[4664]: E1013 07:03:48.595668 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-placement-api:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/placement-db-sync-nhrg9" podUID="817edc64-6579-4cfd-97ab-705680d79119" Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.698923 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fc54c47d5-5tdpn"] Oct 13 07:03:48 crc kubenswrapper[4664]: I1013 07:03:48.706176 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5fc54c47d5-5tdpn"] Oct 13 07:03:49 crc kubenswrapper[4664]: I1013 07:03:49.056327 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd11c3c2-9172-4025-a9da-d6b046b80e55" path="/var/lib/kubelet/pods/bd11c3c2-9172-4025-a9da-d6b046b80e55/volumes" Oct 13 07:03:55 crc kubenswrapper[4664]: E1013 07:03:55.262744 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-glance-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:55 crc kubenswrapper[4664]: E1013 07:03:55.263252 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-glance-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:03:55 crc kubenswrapper[4664]: E1013 07:03:55.263383 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-glance-api:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pkg74,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-7nsnc_openstack(0ca51385-fe74-4d5b-a542-f33734fb8e46): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:03:55 crc kubenswrapper[4664]: E1013 07:03:55.264780 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-7nsnc" podUID="0ca51385-fe74-4d5b-a542-f33734fb8e46" Oct 13 07:03:55 crc kubenswrapper[4664]: E1013 07:03:55.650755 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-glance-api:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/glance-db-sync-7nsnc" podUID="0ca51385-fe74-4d5b-a542-f33734fb8e46" Oct 13 07:03:57 crc kubenswrapper[4664]: I1013 07:03:57.637355 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: i/o timeout" Oct 13 07:03:58 crc kubenswrapper[4664]: I1013 07:03:58.812704 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:03:58 crc kubenswrapper[4664]: I1013 07:03:58.812784 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:03:58 crc kubenswrapper[4664]: I1013 07:03:58.812879 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:03:58 crc kubenswrapper[4664]: I1013 07:03:58.813567 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"90ae6fb696be24e0d495b69a3864f01bdb0ba4e52bf052b9a768126a14f3c7e1"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:03:58 crc kubenswrapper[4664]: I1013 07:03:58.813638 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://90ae6fb696be24e0d495b69a3864f01bdb0ba4e52bf052b9a768126a14f3c7e1" gracePeriod=600 Oct 13 07:03:59 crc kubenswrapper[4664]: I1013 07:03:59.681879 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="90ae6fb696be24e0d495b69a3864f01bdb0ba4e52bf052b9a768126a14f3c7e1" exitCode=0 Oct 13 07:03:59 crc kubenswrapper[4664]: I1013 07:03:59.681939 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"90ae6fb696be24e0d495b69a3864f01bdb0ba4e52bf052b9a768126a14f3c7e1"} Oct 13 07:03:59 crc kubenswrapper[4664]: I1013 07:03:59.682142 4664 scope.go:117] "RemoveContainer" containerID="493f5a79780ab2c8a05a8d3a586ef54fbc0335841ed7372ce3fb36a1348a27b5" Oct 13 07:04:02 crc kubenswrapper[4664]: I1013 07:04:02.638372 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: i/o timeout" Oct 13 07:04:05 crc kubenswrapper[4664]: E1013 07:04:05.307761 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:92672cd85fd36317d65faa0525acf849" Oct 13 07:04:05 crc kubenswrapper[4664]: E1013 07:04:05.308147 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:92672cd85fd36317d65faa0525acf849" Oct 13 07:04:05 crc kubenswrapper[4664]: E1013 07:04:05.308246 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rh476,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-2x2fr_openstack(ad23ea69-3e65-4f4c-afdc-21abded4e19c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:04:05 crc kubenswrapper[4664]: E1013 07:04:05.310579 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-2x2fr" podUID="ad23ea69-3e65-4f4c-afdc-21abded4e19c" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.449221 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.471375 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.481457 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.511262 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.615664 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c1d6c5c8-cced-49ff-8e66-35e961f49d06-horizon-secret-key\") pod \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.615931 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-nb\") pod \"42f42e84-f556-4409-8f8f-3f7f529681e8\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616022 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1d6c5c8-cced-49ff-8e66-35e961f49d06-logs\") pod \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616113 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/22180335-bd2f-4201-be9e-25b47d834e1c-horizon-secret-key\") pod \"22180335-bd2f-4201-be9e-25b47d834e1c\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616191 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-combined-ca-bundle\") pod \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616263 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-config\") pod \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616334 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prc9r\" (UniqueName: \"kubernetes.io/projected/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-kube-api-access-prc9r\") pod \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\" (UID: \"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616428 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcxv2\" (UniqueName: \"kubernetes.io/projected/42f42e84-f556-4409-8f8f-3f7f529681e8-kube-api-access-qcxv2\") pod \"42f42e84-f556-4409-8f8f-3f7f529681e8\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616494 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26mvb\" (UniqueName: \"kubernetes.io/projected/22180335-bd2f-4201-be9e-25b47d834e1c-kube-api-access-26mvb\") pod \"22180335-bd2f-4201-be9e-25b47d834e1c\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616581 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-scripts\") pod \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616659 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-config-data\") pod \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616727 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-config\") pod \"42f42e84-f556-4409-8f8f-3f7f529681e8\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616818 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22180335-bd2f-4201-be9e-25b47d834e1c-logs\") pod \"22180335-bd2f-4201-be9e-25b47d834e1c\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616898 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-scripts\") pod \"22180335-bd2f-4201-be9e-25b47d834e1c\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.616981 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pn9kp\" (UniqueName: \"kubernetes.io/projected/c1d6c5c8-cced-49ff-8e66-35e961f49d06-kube-api-access-pn9kp\") pod \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\" (UID: \"c1d6c5c8-cced-49ff-8e66-35e961f49d06\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.617073 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-dns-svc\") pod \"42f42e84-f556-4409-8f8f-3f7f529681e8\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.617162 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-sb\") pod \"42f42e84-f556-4409-8f8f-3f7f529681e8\" (UID: \"42f42e84-f556-4409-8f8f-3f7f529681e8\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.617229 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-config-data\") pod \"22180335-bd2f-4201-be9e-25b47d834e1c\" (UID: \"22180335-bd2f-4201-be9e-25b47d834e1c\") " Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.617584 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-scripts" (OuterVolumeSpecName: "scripts") pod "c1d6c5c8-cced-49ff-8e66-35e961f49d06" (UID: "c1d6c5c8-cced-49ff-8e66-35e961f49d06"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.618153 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-config-data" (OuterVolumeSpecName: "config-data") pod "c1d6c5c8-cced-49ff-8e66-35e961f49d06" (UID: "c1d6c5c8-cced-49ff-8e66-35e961f49d06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.618483 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1d6c5c8-cced-49ff-8e66-35e961f49d06-logs" (OuterVolumeSpecName: "logs") pod "c1d6c5c8-cced-49ff-8e66-35e961f49d06" (UID: "c1d6c5c8-cced-49ff-8e66-35e961f49d06"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.620365 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-config-data" (OuterVolumeSpecName: "config-data") pod "22180335-bd2f-4201-be9e-25b47d834e1c" (UID: "22180335-bd2f-4201-be9e-25b47d834e1c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.622044 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22180335-bd2f-4201-be9e-25b47d834e1c-logs" (OuterVolumeSpecName: "logs") pod "22180335-bd2f-4201-be9e-25b47d834e1c" (UID: "22180335-bd2f-4201-be9e-25b47d834e1c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.622605 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-scripts" (OuterVolumeSpecName: "scripts") pod "22180335-bd2f-4201-be9e-25b47d834e1c" (UID: "22180335-bd2f-4201-be9e-25b47d834e1c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.624957 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d6c5c8-cced-49ff-8e66-35e961f49d06-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c1d6c5c8-cced-49ff-8e66-35e961f49d06" (UID: "c1d6c5c8-cced-49ff-8e66-35e961f49d06"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.625091 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42f42e84-f556-4409-8f8f-3f7f529681e8-kube-api-access-qcxv2" (OuterVolumeSpecName: "kube-api-access-qcxv2") pod "42f42e84-f556-4409-8f8f-3f7f529681e8" (UID: "42f42e84-f556-4409-8f8f-3f7f529681e8"). InnerVolumeSpecName "kube-api-access-qcxv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.627340 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22180335-bd2f-4201-be9e-25b47d834e1c-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "22180335-bd2f-4201-be9e-25b47d834e1c" (UID: "22180335-bd2f-4201-be9e-25b47d834e1c"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.627470 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22180335-bd2f-4201-be9e-25b47d834e1c-kube-api-access-26mvb" (OuterVolumeSpecName: "kube-api-access-26mvb") pod "22180335-bd2f-4201-be9e-25b47d834e1c" (UID: "22180335-bd2f-4201-be9e-25b47d834e1c"). InnerVolumeSpecName "kube-api-access-26mvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.634250 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1d6c5c8-cced-49ff-8e66-35e961f49d06-kube-api-access-pn9kp" (OuterVolumeSpecName: "kube-api-access-pn9kp") pod "c1d6c5c8-cced-49ff-8e66-35e961f49d06" (UID: "c1d6c5c8-cced-49ff-8e66-35e961f49d06"). InnerVolumeSpecName "kube-api-access-pn9kp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.656367 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-kube-api-access-prc9r" (OuterVolumeSpecName: "kube-api-access-prc9r") pod "57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" (UID: "57a1074e-9a30-4cb7-8278-60d4e8eaf9c7"). InnerVolumeSpecName "kube-api-access-prc9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.661423 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-config" (OuterVolumeSpecName: "config") pod "57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" (UID: "57a1074e-9a30-4cb7-8278-60d4e8eaf9c7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.677911 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-config" (OuterVolumeSpecName: "config") pod "42f42e84-f556-4409-8f8f-3f7f529681e8" (UID: "42f42e84-f556-4409-8f8f-3f7f529681e8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.681973 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "42f42e84-f556-4409-8f8f-3f7f529681e8" (UID: "42f42e84-f556-4409-8f8f-3f7f529681e8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.683959 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" (UID: "57a1074e-9a30-4cb7-8278-60d4e8eaf9c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.699524 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "42f42e84-f556-4409-8f8f-3f7f529681e8" (UID: "42f42e84-f556-4409-8f8f-3f7f529681e8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.708758 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "42f42e84-f556-4409-8f8f-3f7f529681e8" (UID: "42f42e84-f556-4409-8f8f-3f7f529681e8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.719768 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pn9kp\" (UniqueName: \"kubernetes.io/projected/c1d6c5c8-cced-49ff-8e66-35e961f49d06-kube-api-access-pn9kp\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.719882 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.719963 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720030 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720099 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720166 4664 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c1d6c5c8-cced-49ff-8e66-35e961f49d06-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720228 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1d6c5c8-cced-49ff-8e66-35e961f49d06-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720293 4664 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/22180335-bd2f-4201-be9e-25b47d834e1c-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720356 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720420 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720482 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prc9r\" (UniqueName: \"kubernetes.io/projected/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7-kube-api-access-prc9r\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720550 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcxv2\" (UniqueName: \"kubernetes.io/projected/42f42e84-f556-4409-8f8f-3f7f529681e8-kube-api-access-qcxv2\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720611 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26mvb\" (UniqueName: \"kubernetes.io/projected/22180335-bd2f-4201-be9e-25b47d834e1c-kube-api-access-26mvb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720693 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720771 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d6c5c8-cced-49ff-8e66-35e961f49d06-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720849 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42f42e84-f556-4409-8f8f-3f7f529681e8-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.720946 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22180335-bd2f-4201-be9e-25b47d834e1c-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.721027 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/22180335-bd2f-4201-be9e-25b47d834e1c-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.736968 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.737670 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" event={"ID":"42f42e84-f556-4409-8f8f-3f7f529681e8","Type":"ContainerDied","Data":"82066fe6775790d745ac629e99589478cac13110aa8dec143ad9b5f1a8b5811f"} Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.738734 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-757c9cbd7-c6dbp" event={"ID":"22180335-bd2f-4201-be9e-25b47d834e1c","Type":"ContainerDied","Data":"4ff5af2b8228a481e1a20bd6c345805eed37730b3abb6d53a12594c6f190d0f8"} Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.738876 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-757c9cbd7-c6dbp" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.744602 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qt7sf" event={"ID":"57a1074e-9a30-4cb7-8278-60d4e8eaf9c7","Type":"ContainerDied","Data":"819fd8eb6e6306700eacbdbd5c37bde81115db45c2d2b535ce4c8b56e94b4228"} Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.744644 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="819fd8eb6e6306700eacbdbd5c37bde81115db45c2d2b535ce4c8b56e94b4228" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.744699 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qt7sf" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.751947 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-699b597b6f-7d58d" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.751988 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-699b597b6f-7d58d" event={"ID":"c1d6c5c8-cced-49ff-8e66-35e961f49d06","Type":"ContainerDied","Data":"94c7ce1da41ae083aa5c3ae7824ad6c0091fc9e9295b9051819b2cf25d6129d4"} Oct 13 07:04:05 crc kubenswrapper[4664]: E1013 07:04:05.753750 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/heat-db-sync-2x2fr" podUID="ad23ea69-3e65-4f4c-afdc-21abded4e19c" Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.818789 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-757c9cbd7-c6dbp"] Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.842360 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-757c9cbd7-c6dbp"] Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.854348 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d5c4f869-tbdgk"] Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.861951 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d5c4f869-tbdgk"] Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.875154 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-699b597b6f-7d58d"] Oct 13 07:04:05 crc kubenswrapper[4664]: I1013 07:04:05.882164 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-699b597b6f-7d58d"] Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.717368 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64c58ff495-zb76j"] Oct 13 07:04:06 crc kubenswrapper[4664]: E1013 07:04:06.717705 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.717717 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" Oct 13 07:04:06 crc kubenswrapper[4664]: E1013 07:04:06.717742 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="init" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.717748 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="init" Oct 13 07:04:06 crc kubenswrapper[4664]: E1013 07:04:06.717759 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" containerName="neutron-db-sync" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.717765 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" containerName="neutron-db-sync" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.717924 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.717938 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" containerName="neutron-db-sync" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.718727 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.759534 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64c58ff495-zb76j"] Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.842983 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-nb\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.843064 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-sb\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.843098 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-swift-storage-0\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.843182 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-config\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.843224 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-svc\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.843287 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvlkd\" (UniqueName: \"kubernetes.io/projected/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-kube-api-access-qvlkd\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.858894 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-b7789cdc8-2t2mz"] Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.861042 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.867335 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-t6k2k" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.867559 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.876255 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.876809 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.879003 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b7789cdc8-2t2mz"] Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945580 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-sb\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945627 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-swift-storage-0\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945676 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-httpd-config\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945705 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-config\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945739 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-ovndb-tls-certs\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945756 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-combined-ca-bundle\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945774 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-svc\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945806 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-config\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945851 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvlkd\" (UniqueName: \"kubernetes.io/projected/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-kube-api-access-qvlkd\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945897 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc7qz\" (UniqueName: \"kubernetes.io/projected/bcccda30-71c6-4461-8d5f-545fa60cb9ab-kube-api-access-dc7qz\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.945919 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-nb\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.946563 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-sb\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.946609 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-nb\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.947181 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-svc\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.947318 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-config\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.947787 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-swift-storage-0\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:06 crc kubenswrapper[4664]: I1013 07:04:06.971277 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvlkd\" (UniqueName: \"kubernetes.io/projected/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-kube-api-access-qvlkd\") pod \"dnsmasq-dns-64c58ff495-zb76j\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.035850 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.046934 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc7qz\" (UniqueName: \"kubernetes.io/projected/bcccda30-71c6-4461-8d5f-545fa60cb9ab-kube-api-access-dc7qz\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.047022 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-httpd-config\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.047063 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-ovndb-tls-certs\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.047079 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-combined-ca-bundle\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.047099 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-config\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.050629 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-config\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.053549 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-httpd-config\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.056656 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-ovndb-tls-certs\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.057841 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-combined-ca-bundle\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.061079 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22180335-bd2f-4201-be9e-25b47d834e1c" path="/var/lib/kubelet/pods/22180335-bd2f-4201-be9e-25b47d834e1c/volumes" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.061662 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" path="/var/lib/kubelet/pods/42f42e84-f556-4409-8f8f-3f7f529681e8/volumes" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.062836 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1d6c5c8-cced-49ff-8e66-35e961f49d06" path="/var/lib/kubelet/pods/c1d6c5c8-cced-49ff-8e66-35e961f49d06/volumes" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.068358 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc7qz\" (UniqueName: \"kubernetes.io/projected/bcccda30-71c6-4461-8d5f-545fa60cb9ab-kube-api-access-dc7qz\") pod \"neutron-b7789cdc8-2t2mz\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.194939 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:07 crc kubenswrapper[4664]: E1013 07:04:07.281725 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:04:07 crc kubenswrapper[4664]: E1013 07:04:07.281779 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:92672cd85fd36317d65faa0525acf849" Oct 13 07:04:07 crc kubenswrapper[4664]: E1013 07:04:07.281909 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:92672cd85fd36317d65faa0525acf849,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fzmkq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-jbdfp_openstack(0b2b826f-7b2a-4de6-9f80-7c854b988a67): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:04:07 crc kubenswrapper[4664]: E1013 07:04:07.283073 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-jbdfp" podUID="0b2b826f-7b2a-4de6-9f80-7c854b988a67" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.640251 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-d5c4f869-tbdgk" podUID="42f42e84-f556-4409-8f8f-3f7f529681e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: i/o timeout" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.686309 4664 scope.go:117] "RemoveContainer" containerID="4a1606c49d573aaac49165605e04dec2b558f744092d222b91cc3a66f5ba5305" Oct 13 07:04:07 crc kubenswrapper[4664]: E1013 07:04:07.824180 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/cinder-db-sync-jbdfp" podUID="0b2b826f-7b2a-4de6-9f80-7c854b988a67" Oct 13 07:04:07 crc kubenswrapper[4664]: I1013 07:04:07.869231 4664 scope.go:117] "RemoveContainer" containerID="c3119ae1ffa43e0e2da6b7d90790aafc396973ce93245b2639c114067a4939a1" Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.139493 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-tvqs6"] Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.239432 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8487d6c5d4-cgnm9"] Oct 13 07:04:08 crc kubenswrapper[4664]: W1013 07:04:08.270691 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podefaa5cce_79f5_4cdb_abf0_06b59765b776.slice/crio-62ce774e7b934179a542df7e968bd9ff21c78c5a8155f1f3c68bf9bd5d2955ee WatchSource:0}: Error finding container 62ce774e7b934179a542df7e968bd9ff21c78c5a8155f1f3c68bf9bd5d2955ee: Status 404 returned error can't find the container with id 62ce774e7b934179a542df7e968bd9ff21c78c5a8155f1f3c68bf9bd5d2955ee Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.484771 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64c58ff495-zb76j"] Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.508947 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7d78c558d-rjg4v"] Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.518465 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b7789cdc8-2t2mz"] Oct 13 07:04:08 crc kubenswrapper[4664]: W1013 07:04:08.536296 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45e17674_0bf2_491b_8c3b_73b83dcfc3d8.slice/crio-bfa2ce522bf760e83cdef7eef566923767c3ab361cde5ab5954dca7c7b444406 WatchSource:0}: Error finding container bfa2ce522bf760e83cdef7eef566923767c3ab361cde5ab5954dca7c7b444406: Status 404 returned error can't find the container with id bfa2ce522bf760e83cdef7eef566923767c3ab361cde5ab5954dca7c7b444406 Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.844296 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerStarted","Data":"0bad5cc6c81cb37888da1bd0712f04802297fd0a68a2dfb49c78826e80a749b2"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.848890 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nhrg9" event={"ID":"817edc64-6579-4cfd-97ab-705680d79119","Type":"ContainerStarted","Data":"15ec08e83e05d18d28ea41c99c390166025a444fb13a4b6aaa2fb20877fb68ae"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.850653 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g6lvp" event={"ID":"dac71586-090c-42d0-b9a2-9f53b4937c09","Type":"ContainerStarted","Data":"d1353fb72661a2534866aab934397eb5447bac08f8e74ce3079d39e72631348f"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.854404 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tvqs6" event={"ID":"3145c64c-4c70-4445-9ad2-bf492cb74e64","Type":"ContainerStarted","Data":"f03513597d8fbc31e4596fc7b220501859710f528c0d4d3caefa0436ed455c97"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.854431 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tvqs6" event={"ID":"3145c64c-4c70-4445-9ad2-bf492cb74e64","Type":"ContainerStarted","Data":"a23c768a096df28c9bdf418172b1fb6da6519cfc43c1b8758ec3ed0a5bd31990"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.869834 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-nhrg9" podStartSLOduration=3.342440827 podStartE2EDuration="50.869818693s" podCreationTimestamp="2025-10-13 07:03:18 +0000 UTC" firstStartedPulling="2025-10-13 07:03:20.165627291 +0000 UTC m=+1007.853072483" lastFinishedPulling="2025-10-13 07:04:07.693005157 +0000 UTC m=+1055.380450349" observedRunningTime="2025-10-13 07:04:08.861687295 +0000 UTC m=+1056.549132487" watchObservedRunningTime="2025-10-13 07:04:08.869818693 +0000 UTC m=+1056.557263885" Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.870550 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerStarted","Data":"09bf235a4c4846f03042799ec8c066ae12385b33aca8f153a59ac0fa0182741a"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.892043 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerStarted","Data":"62ce774e7b934179a542df7e968bd9ff21c78c5a8155f1f3c68bf9bd5d2955ee"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.901933 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"961db826ff7e07c02c9982962c5607bbdb4ed756076ad7120e5104e7d1c7cca4"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.903677 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-g6lvp" podStartSLOduration=3.362556374 podStartE2EDuration="50.903665287s" podCreationTimestamp="2025-10-13 07:03:18 +0000 UTC" firstStartedPulling="2025-10-13 07:03:20.166894405 +0000 UTC m=+1007.854339597" lastFinishedPulling="2025-10-13 07:04:07.708003318 +0000 UTC m=+1055.395448510" observedRunningTime="2025-10-13 07:04:08.884852154 +0000 UTC m=+1056.572297356" watchObservedRunningTime="2025-10-13 07:04:08.903665287 +0000 UTC m=+1056.591110499" Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.915168 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-tvqs6" podStartSLOduration=41.915151034 podStartE2EDuration="41.915151034s" podCreationTimestamp="2025-10-13 07:03:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:08.907430287 +0000 UTC m=+1056.594875479" watchObservedRunningTime="2025-10-13 07:04:08.915151034 +0000 UTC m=+1056.602596226" Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.915413 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b7789cdc8-2t2mz" event={"ID":"bcccda30-71c6-4461-8d5f-545fa60cb9ab","Type":"ContainerStarted","Data":"175799aa812484a69ce3c2e0b45de95e91600a27146ea8c8bf1da8a7cc14cf8a"} Oct 13 07:04:08 crc kubenswrapper[4664]: I1013 07:04:08.917038 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" event={"ID":"45e17674-0bf2-491b-8c3b-73b83dcfc3d8","Type":"ContainerStarted","Data":"bfa2ce522bf760e83cdef7eef566923767c3ab361cde5ab5954dca7c7b444406"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.927695 4664 generic.go:334] "Generic (PLEG): container finished" podID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerID="fe80ee217c5802187b19df280b32bda094f0ff62ddd6725f72ce8b3724716482" exitCode=0 Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.927758 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" event={"ID":"45e17674-0bf2-491b-8c3b-73b83dcfc3d8","Type":"ContainerDied","Data":"fe80ee217c5802187b19df280b32bda094f0ff62ddd6725f72ce8b3724716482"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.930751 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerStarted","Data":"edb368fb2b90a86f8b5cf8dbcdbe841a8c7ef1178d9d56a64c1acfb2e63d34c0"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.930784 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerStarted","Data":"b3140382539f3da699fccec40fbcb27ac5fe0d631e911ac7ccdf3942dc8804fe"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.948449 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7nsnc" event={"ID":"0ca51385-fe74-4d5b-a542-f33734fb8e46","Type":"ContainerStarted","Data":"e3db5d83c1b6440ebe159e708aa3bd5086266878c5e5e5c6a2f61fa012f97ab6"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.967638 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerStarted","Data":"6dd7403fc2ce851531d7d9fd3780f8fa218eb32de5a0368ce4f3d96e46b9049b"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.967676 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerStarted","Data":"c995b53e3e760f0e991f2fa6d22b4a9435f276e7c32d9fcd380d13a0f190e9bf"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.976743 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b7789cdc8-2t2mz" event={"ID":"bcccda30-71c6-4461-8d5f-545fa60cb9ab","Type":"ContainerStarted","Data":"a02de48f31d1eb5dff6f95bfd76ac165fbeefd66d8e68acae06e902c9a28947f"} Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.976978 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:09 crc kubenswrapper[4664]: I1013 07:04:09.977046 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b7789cdc8-2t2mz" event={"ID":"bcccda30-71c6-4461-8d5f-545fa60cb9ab","Type":"ContainerStarted","Data":"810b3204424935fa735cbf37a668d73b3a982f147986947fa806549e6c51fe64"} Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.008846 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-8487d6c5d4-cgnm9" podStartSLOduration=42.704698568 podStartE2EDuration="43.008786356s" podCreationTimestamp="2025-10-13 07:03:27 +0000 UTC" firstStartedPulling="2025-10-13 07:04:08.279566555 +0000 UTC m=+1055.967011747" lastFinishedPulling="2025-10-13 07:04:08.583654343 +0000 UTC m=+1056.271099535" observedRunningTime="2025-10-13 07:04:10.0037053 +0000 UTC m=+1057.691150492" watchObservedRunningTime="2025-10-13 07:04:10.008786356 +0000 UTC m=+1057.696231548" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.041251 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-7nsnc" podStartSLOduration=5.269991595 podStartE2EDuration="1m0.041235553s" podCreationTimestamp="2025-10-13 07:03:10 +0000 UTC" firstStartedPulling="2025-10-13 07:03:13.107089472 +0000 UTC m=+1000.794534684" lastFinishedPulling="2025-10-13 07:04:07.87833345 +0000 UTC m=+1055.565778642" observedRunningTime="2025-10-13 07:04:10.03961942 +0000 UTC m=+1057.727064612" watchObservedRunningTime="2025-10-13 07:04:10.041235553 +0000 UTC m=+1057.728680745" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.095466 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-b7789cdc8-2t2mz" podStartSLOduration=4.095429932 podStartE2EDuration="4.095429932s" podCreationTimestamp="2025-10-13 07:04:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:10.084556441 +0000 UTC m=+1057.772001633" watchObservedRunningTime="2025-10-13 07:04:10.095429932 +0000 UTC m=+1057.782875124" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.121230 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-757f4d5bc7-72d99"] Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.122831 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.129606 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.129815 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.148567 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-757f4d5bc7-72d99"] Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.153620 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7d78c558d-rjg4v" podStartSLOduration=42.882248444 podStartE2EDuration="43.153575906s" podCreationTimestamp="2025-10-13 07:03:27 +0000 UTC" firstStartedPulling="2025-10-13 07:04:08.565613271 +0000 UTC m=+1056.253058463" lastFinishedPulling="2025-10-13 07:04:08.836940733 +0000 UTC m=+1056.524385925" observedRunningTime="2025-10-13 07:04:10.139158601 +0000 UTC m=+1057.826603783" watchObservedRunningTime="2025-10-13 07:04:10.153575906 +0000 UTC m=+1057.841021108" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.223696 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-config\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.223737 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m4q5\" (UniqueName: \"kubernetes.io/projected/c6939aa3-6706-4731-b78f-f27013c60049-kube-api-access-2m4q5\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.223759 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-public-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.223832 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-ovndb-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.223857 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-combined-ca-bundle\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.223873 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-httpd-config\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.224069 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-internal-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.325092 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-ovndb-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.326036 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-combined-ca-bundle\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.326404 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-httpd-config\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.326549 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-internal-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.326652 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-config\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.326727 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m4q5\" (UniqueName: \"kubernetes.io/projected/c6939aa3-6706-4731-b78f-f27013c60049-kube-api-access-2m4q5\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.326850 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-public-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.339070 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-public-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.341506 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-config\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.342309 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-ovndb-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.344345 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-httpd-config\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.352733 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-internal-tls-certs\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.361413 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-combined-ca-bundle\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.384091 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m4q5\" (UniqueName: \"kubernetes.io/projected/c6939aa3-6706-4731-b78f-f27013c60049-kube-api-access-2m4q5\") pod \"neutron-757f4d5bc7-72d99\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:10 crc kubenswrapper[4664]: I1013 07:04:10.450938 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:11 crc kubenswrapper[4664]: I1013 07:04:10.999947 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" event={"ID":"45e17674-0bf2-491b-8c3b-73b83dcfc3d8","Type":"ContainerStarted","Data":"bd3cde87b9b883059a0502643132ec9f20c4070857606a45dcf7b700fd6a6e0c"} Oct 13 07:04:11 crc kubenswrapper[4664]: I1013 07:04:11.021167 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" podStartSLOduration=5.021151876 podStartE2EDuration="5.021151876s" podCreationTimestamp="2025-10-13 07:04:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:11.017488477 +0000 UTC m=+1058.704933679" watchObservedRunningTime="2025-10-13 07:04:11.021151876 +0000 UTC m=+1058.708597068" Oct 13 07:04:11 crc kubenswrapper[4664]: I1013 07:04:11.258365 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-757f4d5bc7-72d99"] Oct 13 07:04:12 crc kubenswrapper[4664]: I1013 07:04:12.013883 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-757f4d5bc7-72d99" event={"ID":"c6939aa3-6706-4731-b78f-f27013c60049","Type":"ContainerStarted","Data":"a5751a966d89b818450155acb99025fc39018ee60ac69d1269424a004aa94e0b"} Oct 13 07:04:12 crc kubenswrapper[4664]: I1013 07:04:12.014105 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:12 crc kubenswrapper[4664]: I1013 07:04:12.014115 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-757f4d5bc7-72d99" event={"ID":"c6939aa3-6706-4731-b78f-f27013c60049","Type":"ContainerStarted","Data":"e37381fa848a248f07e9bd62bba40f44b9ab1cb6563151d4cb26bcefacd306e7"} Oct 13 07:04:12 crc kubenswrapper[4664]: I1013 07:04:12.014123 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-757f4d5bc7-72d99" event={"ID":"c6939aa3-6706-4731-b78f-f27013c60049","Type":"ContainerStarted","Data":"2c95824f127f3681be1b0f7e8183116a96cbee4a8a315ef4d72ed04d840e45df"} Oct 13 07:04:12 crc kubenswrapper[4664]: I1013 07:04:12.014150 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:12 crc kubenswrapper[4664]: I1013 07:04:12.033519 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-757f4d5bc7-72d99" podStartSLOduration=2.033503735 podStartE2EDuration="2.033503735s" podCreationTimestamp="2025-10-13 07:04:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:12.030058663 +0000 UTC m=+1059.717503875" watchObservedRunningTime="2025-10-13 07:04:12.033503735 +0000 UTC m=+1059.720948927" Oct 13 07:04:15 crc kubenswrapper[4664]: I1013 07:04:15.042342 4664 generic.go:334] "Generic (PLEG): container finished" podID="3145c64c-4c70-4445-9ad2-bf492cb74e64" containerID="f03513597d8fbc31e4596fc7b220501859710f528c0d4d3caefa0436ed455c97" exitCode=0 Oct 13 07:04:15 crc kubenswrapper[4664]: I1013 07:04:15.042390 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tvqs6" event={"ID":"3145c64c-4c70-4445-9ad2-bf492cb74e64","Type":"ContainerDied","Data":"f03513597d8fbc31e4596fc7b220501859710f528c0d4d3caefa0436ed455c97"} Oct 13 07:04:15 crc kubenswrapper[4664]: I1013 07:04:15.049053 4664 generic.go:334] "Generic (PLEG): container finished" podID="817edc64-6579-4cfd-97ab-705680d79119" containerID="15ec08e83e05d18d28ea41c99c390166025a444fb13a4b6aaa2fb20877fb68ae" exitCode=0 Oct 13 07:04:15 crc kubenswrapper[4664]: I1013 07:04:15.060144 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nhrg9" event={"ID":"817edc64-6579-4cfd-97ab-705680d79119","Type":"ContainerDied","Data":"15ec08e83e05d18d28ea41c99c390166025a444fb13a4b6aaa2fb20877fb68ae"} Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.894011 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nhrg9" Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.906197 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.964756 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-combined-ca-bundle\") pod \"817edc64-6579-4cfd-97ab-705680d79119\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.964824 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-config-data\") pod \"817edc64-6579-4cfd-97ab-705680d79119\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.964858 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-credential-keys\") pod \"3145c64c-4c70-4445-9ad2-bf492cb74e64\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.964899 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-combined-ca-bundle\") pod \"3145c64c-4c70-4445-9ad2-bf492cb74e64\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.964936 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/817edc64-6579-4cfd-97ab-705680d79119-logs\") pod \"817edc64-6579-4cfd-97ab-705680d79119\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.964954 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-fernet-keys\") pod \"3145c64c-4c70-4445-9ad2-bf492cb74e64\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.965021 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djxtr\" (UniqueName: \"kubernetes.io/projected/817edc64-6579-4cfd-97ab-705680d79119-kube-api-access-djxtr\") pod \"817edc64-6579-4cfd-97ab-705680d79119\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.965051 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txkzx\" (UniqueName: \"kubernetes.io/projected/3145c64c-4c70-4445-9ad2-bf492cb74e64-kube-api-access-txkzx\") pod \"3145c64c-4c70-4445-9ad2-bf492cb74e64\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.965079 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-config-data\") pod \"3145c64c-4c70-4445-9ad2-bf492cb74e64\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.965143 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-scripts\") pod \"3145c64c-4c70-4445-9ad2-bf492cb74e64\" (UID: \"3145c64c-4c70-4445-9ad2-bf492cb74e64\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.965171 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-scripts\") pod \"817edc64-6579-4cfd-97ab-705680d79119\" (UID: \"817edc64-6579-4cfd-97ab-705680d79119\") " Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.966119 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/817edc64-6579-4cfd-97ab-705680d79119-logs" (OuterVolumeSpecName: "logs") pod "817edc64-6579-4cfd-97ab-705680d79119" (UID: "817edc64-6579-4cfd-97ab-705680d79119"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.971898 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3145c64c-4c70-4445-9ad2-bf492cb74e64" (UID: "3145c64c-4c70-4445-9ad2-bf492cb74e64"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.982971 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-scripts" (OuterVolumeSpecName: "scripts") pod "817edc64-6579-4cfd-97ab-705680d79119" (UID: "817edc64-6579-4cfd-97ab-705680d79119"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.986165 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3145c64c-4c70-4445-9ad2-bf492cb74e64" (UID: "3145c64c-4c70-4445-9ad2-bf492cb74e64"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.991201 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3145c64c-4c70-4445-9ad2-bf492cb74e64-kube-api-access-txkzx" (OuterVolumeSpecName: "kube-api-access-txkzx") pod "3145c64c-4c70-4445-9ad2-bf492cb74e64" (UID: "3145c64c-4c70-4445-9ad2-bf492cb74e64"). InnerVolumeSpecName "kube-api-access-txkzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:16 crc kubenswrapper[4664]: I1013 07:04:16.992741 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-scripts" (OuterVolumeSpecName: "scripts") pod "3145c64c-4c70-4445-9ad2-bf492cb74e64" (UID: "3145c64c-4c70-4445-9ad2-bf492cb74e64"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.008310 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/817edc64-6579-4cfd-97ab-705680d79119-kube-api-access-djxtr" (OuterVolumeSpecName: "kube-api-access-djxtr") pod "817edc64-6579-4cfd-97ab-705680d79119" (UID: "817edc64-6579-4cfd-97ab-705680d79119"). InnerVolumeSpecName "kube-api-access-djxtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.012135 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "817edc64-6579-4cfd-97ab-705680d79119" (UID: "817edc64-6579-4cfd-97ab-705680d79119"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.012925 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-config-data" (OuterVolumeSpecName: "config-data") pod "817edc64-6579-4cfd-97ab-705680d79119" (UID: "817edc64-6579-4cfd-97ab-705680d79119"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.026699 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-config-data" (OuterVolumeSpecName: "config-data") pod "3145c64c-4c70-4445-9ad2-bf492cb74e64" (UID: "3145c64c-4c70-4445-9ad2-bf492cb74e64"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.026850 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3145c64c-4c70-4445-9ad2-bf492cb74e64" (UID: "3145c64c-4c70-4445-9ad2-bf492cb74e64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.037939 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.066960 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.066992 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067001 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067011 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817edc64-6579-4cfd-97ab-705680d79119-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067020 4664 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067029 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067038 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/817edc64-6579-4cfd-97ab-705680d79119-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067046 4664 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067054 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djxtr\" (UniqueName: \"kubernetes.io/projected/817edc64-6579-4cfd-97ab-705680d79119-kube-api-access-djxtr\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067062 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txkzx\" (UniqueName: \"kubernetes.io/projected/3145c64c-4c70-4445-9ad2-bf492cb74e64-kube-api-access-txkzx\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.067071 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3145c64c-4c70-4445-9ad2-bf492cb74e64-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.104138 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cfbcdc87f-qnbnm"] Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.104379 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" podUID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerName="dnsmasq-dns" containerID="cri-o://0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8" gracePeriod=10 Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.108985 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nhrg9" event={"ID":"817edc64-6579-4cfd-97ab-705680d79119","Type":"ContainerDied","Data":"24328cde770d81269c4c1d3318a3f5acd1e823e32a54c1fed4723c4cfeb02ba4"} Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.109008 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24328cde770d81269c4c1d3318a3f5acd1e823e32a54c1fed4723c4cfeb02ba4" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.109054 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nhrg9" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.123036 4664 generic.go:334] "Generic (PLEG): container finished" podID="dac71586-090c-42d0-b9a2-9f53b4937c09" containerID="d1353fb72661a2534866aab934397eb5447bac08f8e74ce3079d39e72631348f" exitCode=0 Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.123095 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g6lvp" event={"ID":"dac71586-090c-42d0-b9a2-9f53b4937c09","Type":"ContainerDied","Data":"d1353fb72661a2534866aab934397eb5447bac08f8e74ce3079d39e72631348f"} Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.131533 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tvqs6" event={"ID":"3145c64c-4c70-4445-9ad2-bf492cb74e64","Type":"ContainerDied","Data":"a23c768a096df28c9bdf418172b1fb6da6519cfc43c1b8758ec3ed0a5bd31990"} Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.131587 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a23c768a096df28c9bdf418172b1fb6da6519cfc43c1b8758ec3ed0a5bd31990" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.131642 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tvqs6" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.212944 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6559fb6d89-x6txc"] Oct 13 07:04:17 crc kubenswrapper[4664]: E1013 07:04:17.213506 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3145c64c-4c70-4445-9ad2-bf492cb74e64" containerName="keystone-bootstrap" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.213522 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3145c64c-4c70-4445-9ad2-bf492cb74e64" containerName="keystone-bootstrap" Oct 13 07:04:17 crc kubenswrapper[4664]: E1013 07:04:17.213538 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817edc64-6579-4cfd-97ab-705680d79119" containerName="placement-db-sync" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.213544 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="817edc64-6579-4cfd-97ab-705680d79119" containerName="placement-db-sync" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.213703 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3145c64c-4c70-4445-9ad2-bf492cb74e64" containerName="keystone-bootstrap" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.213713 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="817edc64-6579-4cfd-97ab-705680d79119" containerName="placement-db-sync" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.214252 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.216094 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.216402 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kssk7" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.216547 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.217487 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.218574 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.218736 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.232480 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6559fb6d89-x6txc"] Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.270787 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-internal-tls-certs\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.271039 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-combined-ca-bundle\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.271137 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-config-data\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.271233 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-fernet-keys\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.271314 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-credential-keys\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.271382 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-public-tls-certs\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.271451 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-scripts\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.271552 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ttj8\" (UniqueName: \"kubernetes.io/projected/69ae4f17-d740-411e-8cb2-45afda327f7e-kube-api-access-7ttj8\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.280043 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5b885874bd-tzm67"] Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.287969 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.293125 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.293265 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.293400 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.294400 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.294632 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hv5d5" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.302053 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5b885874bd-tzm67"] Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373722 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-internal-tls-certs\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373761 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-combined-ca-bundle\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373823 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c8fv\" (UniqueName: \"kubernetes.io/projected/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-kube-api-access-9c8fv\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373855 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-config-data\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373870 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-logs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373897 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-scripts\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373916 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-fernet-keys\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373935 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-public-tls-certs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373949 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-combined-ca-bundle\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373975 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-credential-keys\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.373994 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-config-data\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.374012 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-public-tls-certs\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.374032 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-scripts\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.374071 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-internal-tls-certs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.374091 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ttj8\" (UniqueName: \"kubernetes.io/projected/69ae4f17-d740-411e-8cb2-45afda327f7e-kube-api-access-7ttj8\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.379612 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-public-tls-certs\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.379889 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-config-data\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.380853 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-credential-keys\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.381368 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-fernet-keys\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.383453 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-combined-ca-bundle\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.383667 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-scripts\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.383924 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae4f17-d740-411e-8cb2-45afda327f7e-internal-tls-certs\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.388962 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ttj8\" (UniqueName: \"kubernetes.io/projected/69ae4f17-d740-411e-8cb2-45afda327f7e-kube-api-access-7ttj8\") pod \"keystone-6559fb6d89-x6txc\" (UID: \"69ae4f17-d740-411e-8cb2-45afda327f7e\") " pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.475275 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-internal-tls-certs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.475369 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c8fv\" (UniqueName: \"kubernetes.io/projected/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-kube-api-access-9c8fv\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.475393 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-logs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.475420 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-scripts\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.475441 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-public-tls-certs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.475458 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-combined-ca-bundle\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.475482 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-config-data\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.477037 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-logs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.481205 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-public-tls-certs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.484755 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-internal-tls-certs\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.484970 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-scripts\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.493148 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-combined-ca-bundle\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.497194 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-config-data\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.497408 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c8fv\" (UniqueName: \"kubernetes.io/projected/0ba8020c-0e48-4f3b-a4bc-646f201bfdef-kube-api-access-9c8fv\") pod \"placement-5b885874bd-tzm67\" (UID: \"0ba8020c-0e48-4f3b-a4bc-646f201bfdef\") " pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.556527 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.592018 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.616483 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.683298 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-nb\") pod \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.683618 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-sb\") pod \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.683658 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4q8g\" (UniqueName: \"kubernetes.io/projected/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-kube-api-access-t4q8g\") pod \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.683697 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-config\") pod \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.684122 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-svc\") pod \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.684200 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-swift-storage-0\") pod \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\" (UID: \"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738\") " Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.703618 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-kube-api-access-t4q8g" (OuterVolumeSpecName: "kube-api-access-t4q8g") pod "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" (UID: "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738"). InnerVolumeSpecName "kube-api-access-t4q8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.740503 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.742814 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.794309 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4q8g\" (UniqueName: \"kubernetes.io/projected/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-kube-api-access-t4q8g\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.798608 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-config" (OuterVolumeSpecName: "config") pod "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" (UID: "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.872125 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.873497 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.874294 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" (UID: "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.893358 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" (UID: "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.894436 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" (UID: "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.895479 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.895498 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.895507 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.895517 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:17 crc kubenswrapper[4664]: I1013 07:04:17.907763 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" (UID: "ff35e9a6-bd42-47f6-88e2-c53f6b3e9738"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.006132 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.142571 4664 generic.go:334] "Generic (PLEG): container finished" podID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerID="0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8" exitCode=0 Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.142632 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.142680 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" event={"ID":"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738","Type":"ContainerDied","Data":"0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8"} Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.142708 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cfbcdc87f-qnbnm" event={"ID":"ff35e9a6-bd42-47f6-88e2-c53f6b3e9738","Type":"ContainerDied","Data":"525b95323eeea6911ee96b219f7d78c98b37ccec50fc69896a76dd302bca3750"} Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.142724 4664 scope.go:117] "RemoveContainer" containerID="0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.162203 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerStarted","Data":"51601bdca4f7d778209c81f850c87649d1cb6bfc7959dddcc21d5b46b7c5c604"} Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.196739 4664 scope.go:117] "RemoveContainer" containerID="acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.202221 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cfbcdc87f-qnbnm"] Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.276906 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cfbcdc87f-qnbnm"] Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.296239 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5b885874bd-tzm67"] Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.349652 4664 scope.go:117] "RemoveContainer" containerID="0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8" Oct 13 07:04:18 crc kubenswrapper[4664]: E1013 07:04:18.370645 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8\": container with ID starting with 0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8 not found: ID does not exist" containerID="0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.370681 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8"} err="failed to get container status \"0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8\": rpc error: code = NotFound desc = could not find container \"0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8\": container with ID starting with 0dd9646ee02a70536d06e8deadec08fb37afb57663e8f4409a3594a1117e72c8 not found: ID does not exist" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.370707 4664 scope.go:117] "RemoveContainer" containerID="acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f" Oct 13 07:04:18 crc kubenswrapper[4664]: E1013 07:04:18.386267 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f\": container with ID starting with acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f not found: ID does not exist" containerID="acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.386318 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f"} err="failed to get container status \"acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f\": rpc error: code = NotFound desc = could not find container \"acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f\": container with ID starting with acdeea9c733d2f0d8e3e77f40f112d7889637f024bc21b4c4c086b741be4767f not found: ID does not exist" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.387875 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6559fb6d89-x6txc"] Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.586955 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.628862 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-db-sync-config-data\") pod \"dac71586-090c-42d0-b9a2-9f53b4937c09\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.629024 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-combined-ca-bundle\") pod \"dac71586-090c-42d0-b9a2-9f53b4937c09\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.629130 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktxfx\" (UniqueName: \"kubernetes.io/projected/dac71586-090c-42d0-b9a2-9f53b4937c09-kube-api-access-ktxfx\") pod \"dac71586-090c-42d0-b9a2-9f53b4937c09\" (UID: \"dac71586-090c-42d0-b9a2-9f53b4937c09\") " Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.635063 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "dac71586-090c-42d0-b9a2-9f53b4937c09" (UID: "dac71586-090c-42d0-b9a2-9f53b4937c09"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.635289 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dac71586-090c-42d0-b9a2-9f53b4937c09-kube-api-access-ktxfx" (OuterVolumeSpecName: "kube-api-access-ktxfx") pod "dac71586-090c-42d0-b9a2-9f53b4937c09" (UID: "dac71586-090c-42d0-b9a2-9f53b4937c09"). InnerVolumeSpecName "kube-api-access-ktxfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.670082 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dac71586-090c-42d0-b9a2-9f53b4937c09" (UID: "dac71586-090c-42d0-b9a2-9f53b4937c09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.732108 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktxfx\" (UniqueName: \"kubernetes.io/projected/dac71586-090c-42d0-b9a2-9f53b4937c09-kube-api-access-ktxfx\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.732147 4664 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:18 crc kubenswrapper[4664]: I1013 07:04:18.732162 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac71586-090c-42d0-b9a2-9f53b4937c09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.067888 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" path="/var/lib/kubelet/pods/ff35e9a6-bd42-47f6-88e2-c53f6b3e9738/volumes" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.191693 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b885874bd-tzm67" event={"ID":"0ba8020c-0e48-4f3b-a4bc-646f201bfdef","Type":"ContainerStarted","Data":"541e5cd64eac4ad95f568949dec7fc17a20a425e0a2575b13fd21e47c7c98eda"} Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.191748 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b885874bd-tzm67" event={"ID":"0ba8020c-0e48-4f3b-a4bc-646f201bfdef","Type":"ContainerStarted","Data":"a1a957f6aade383e3e5a2c8b42c94c052624357d0420c97350e4bbb0d63db63c"} Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.213087 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g6lvp" event={"ID":"dac71586-090c-42d0-b9a2-9f53b4937c09","Type":"ContainerDied","Data":"ab1002a8bcd2a95b2ad69e57b9314dfc6ee1b0e69b6f19d3c11d14b005e18a77"} Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.213127 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab1002a8bcd2a95b2ad69e57b9314dfc6ee1b0e69b6f19d3c11d14b005e18a77" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.213195 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g6lvp" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.236754 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6559fb6d89-x6txc" event={"ID":"69ae4f17-d740-411e-8cb2-45afda327f7e","Type":"ContainerStarted","Data":"5d2e41ae628922c14f667f36d386a620d2a3cc2faf8f8c1230d4fffe4b1e9241"} Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.236873 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6559fb6d89-x6txc" event={"ID":"69ae4f17-d740-411e-8cb2-45afda327f7e","Type":"ContainerStarted","Data":"081feac5e89ada397436d5d32e4f9f4e4f126e16d00ce55cb2f31bca6748c46d"} Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.236913 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.262813 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6559fb6d89-x6txc" podStartSLOduration=2.2627793880000002 podStartE2EDuration="2.262779388s" podCreationTimestamp="2025-10-13 07:04:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:19.258477734 +0000 UTC m=+1066.945922936" watchObservedRunningTime="2025-10-13 07:04:19.262779388 +0000 UTC m=+1066.950224580" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.415891 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5b55d75d57-5ft48"] Oct 13 07:04:19 crc kubenswrapper[4664]: E1013 07:04:19.416308 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dac71586-090c-42d0-b9a2-9f53b4937c09" containerName="barbican-db-sync" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.416319 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="dac71586-090c-42d0-b9a2-9f53b4937c09" containerName="barbican-db-sync" Oct 13 07:04:19 crc kubenswrapper[4664]: E1013 07:04:19.416343 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerName="init" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.416348 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerName="init" Oct 13 07:04:19 crc kubenswrapper[4664]: E1013 07:04:19.416366 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerName="dnsmasq-dns" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.416372 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerName="dnsmasq-dns" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.416539 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff35e9a6-bd42-47f6-88e2-c53f6b3e9738" containerName="dnsmasq-dns" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.416553 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="dac71586-090c-42d0-b9a2-9f53b4937c09" containerName="barbican-db-sync" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.418038 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.425671 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.425915 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2p8n2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.426032 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.452839 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-config-data\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.452896 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8nfz\" (UniqueName: \"kubernetes.io/projected/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-kube-api-access-v8nfz\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.452940 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-config-data-custom\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.452961 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-combined-ca-bundle\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.453001 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-logs\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.471744 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5b55d75d57-5ft48"] Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.509647 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-75bf6d8446-kj4kf"] Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.511287 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.514235 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.526575 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-75bf6d8446-kj4kf"] Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560012 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-combined-ca-bundle\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560085 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-config-data\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560129 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hwk6\" (UniqueName: \"kubernetes.io/projected/75f2bbf2-f367-4513-bb39-e403007183a8-kube-api-access-8hwk6\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560153 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8nfz\" (UniqueName: \"kubernetes.io/projected/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-kube-api-access-v8nfz\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560191 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/75f2bbf2-f367-4513-bb39-e403007183a8-logs\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560217 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-config-data-custom\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560238 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-config-data-custom\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560260 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-combined-ca-bundle\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560289 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-config-data\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560317 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-logs\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.560750 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-logs\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.581140 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-config-data-custom\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.583524 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-combined-ca-bundle\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.625454 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-config-data\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.627878 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8nfz\" (UniqueName: \"kubernetes.io/projected/48a7c25d-db6e-46dd-8b73-c6269b23cb5e-kube-api-access-v8nfz\") pod \"barbican-worker-5b55d75d57-5ft48\" (UID: \"48a7c25d-db6e-46dd-8b73-c6269b23cb5e\") " pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.656180 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65f7b65d4c-fmz64"] Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.657960 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.661667 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-nb\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.661750 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-sb\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.661783 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-swift-storage-0\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.661840 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-combined-ca-bundle\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.661915 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-svc\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.661955 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hwk6\" (UniqueName: \"kubernetes.io/projected/75f2bbf2-f367-4513-bb39-e403007183a8-kube-api-access-8hwk6\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.662001 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfc9r\" (UniqueName: \"kubernetes.io/projected/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-kube-api-access-gfc9r\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.662045 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-config\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.662073 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/75f2bbf2-f367-4513-bb39-e403007183a8-logs\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.662101 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-config-data-custom\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.662172 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-config-data\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.663332 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/75f2bbf2-f367-4513-bb39-e403007183a8-logs\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.665174 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65f7b65d4c-fmz64"] Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.671596 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-combined-ca-bundle\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.672261 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-config-data\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.686733 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75f2bbf2-f367-4513-bb39-e403007183a8-config-data-custom\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.709619 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6d67b46cc4-4zwj2"] Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.717871 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.723336 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hwk6\" (UniqueName: \"kubernetes.io/projected/75f2bbf2-f367-4513-bb39-e403007183a8-kube-api-access-8hwk6\") pod \"barbican-keystone-listener-75bf6d8446-kj4kf\" (UID: \"75f2bbf2-f367-4513-bb39-e403007183a8\") " pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.734191 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.761234 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6d67b46cc4-4zwj2"] Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775606 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-svc\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775669 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfc9r\" (UniqueName: \"kubernetes.io/projected/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-kube-api-access-gfc9r\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775707 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775735 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-config\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775782 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data-custom\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775854 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-nb\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775907 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-sb\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775935 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-swift-storage-0\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775952 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-combined-ca-bundle\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775981 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-logs\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.775999 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2zwx\" (UniqueName: \"kubernetes.io/projected/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-kube-api-access-m2zwx\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.776839 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-svc\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.777578 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-sb\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.777656 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-nb\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.778157 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-swift-storage-0\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.778491 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-config\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.803334 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfc9r\" (UniqueName: \"kubernetes.io/projected/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-kube-api-access-gfc9r\") pod \"dnsmasq-dns-65f7b65d4c-fmz64\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.869217 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b55d75d57-5ft48" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.878878 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.878968 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data-custom\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.879061 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-combined-ca-bundle\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.879100 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-logs\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.879125 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2zwx\" (UniqueName: \"kubernetes.io/projected/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-kube-api-access-m2zwx\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.880249 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-logs\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.898254 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.903342 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-combined-ca-bundle\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.921422 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data-custom\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.937679 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2zwx\" (UniqueName: \"kubernetes.io/projected/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-kube-api-access-m2zwx\") pod \"barbican-api-6d67b46cc4-4zwj2\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:19 crc kubenswrapper[4664]: I1013 07:04:19.957364 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.016003 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.019966 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.345056 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2x2fr" event={"ID":"ad23ea69-3e65-4f4c-afdc-21abded4e19c","Type":"ContainerStarted","Data":"cbbbe3b2a274195ab47e30f3b223cc94f88a8d640fa6fef4401543959fba725e"} Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.373691 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b885874bd-tzm67" event={"ID":"0ba8020c-0e48-4f3b-a4bc-646f201bfdef","Type":"ContainerStarted","Data":"4a557f38631a0def69426c2422db76c4d85e0fc682d23ceac866be5a16c0f9c8"} Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.373735 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.373765 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.397891 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-2x2fr" podStartSLOduration=2.386808395 podStartE2EDuration="1m2.397871869s" podCreationTimestamp="2025-10-13 07:03:18 +0000 UTC" firstStartedPulling="2025-10-13 07:03:19.369260576 +0000 UTC m=+1007.056705758" lastFinishedPulling="2025-10-13 07:04:19.38032404 +0000 UTC m=+1067.067769232" observedRunningTime="2025-10-13 07:04:20.374158475 +0000 UTC m=+1068.061603677" watchObservedRunningTime="2025-10-13 07:04:20.397871869 +0000 UTC m=+1068.085317061" Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.415956 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5b885874bd-tzm67" podStartSLOduration=3.415922582 podStartE2EDuration="3.415922582s" podCreationTimestamp="2025-10-13 07:04:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:20.411186464 +0000 UTC m=+1068.098631666" watchObservedRunningTime="2025-10-13 07:04:20.415922582 +0000 UTC m=+1068.103367774" Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.579260 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-75bf6d8446-kj4kf"] Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.763019 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5b55d75d57-5ft48"] Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.865964 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6d67b46cc4-4zwj2"] Oct 13 07:04:20 crc kubenswrapper[4664]: I1013 07:04:20.904572 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65f7b65d4c-fmz64"] Oct 13 07:04:21 crc kubenswrapper[4664]: I1013 07:04:21.380337 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" event={"ID":"75f2bbf2-f367-4513-bb39-e403007183a8","Type":"ContainerStarted","Data":"6b2507418ad65345d4877e458889716548cc739ec45656e0ff4abad36ca85ecd"} Oct 13 07:04:21 crc kubenswrapper[4664]: I1013 07:04:21.381392 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" event={"ID":"4fa11d17-6316-4c38-88e0-a7a5c293b9d1","Type":"ContainerStarted","Data":"c516d7d4fe916b3dfddb274773ccc8c3c7b7159039c6d91102b5b10736389a0b"} Oct 13 07:04:21 crc kubenswrapper[4664]: I1013 07:04:21.384282 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d67b46cc4-4zwj2" event={"ID":"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5","Type":"ContainerStarted","Data":"0c7161c7f8c9806e2440762a434e62070bafe65217dfc8a23ee5f7089e818e84"} Oct 13 07:04:21 crc kubenswrapper[4664]: I1013 07:04:21.385609 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b55d75d57-5ft48" event={"ID":"48a7c25d-db6e-46dd-8b73-c6269b23cb5e","Type":"ContainerStarted","Data":"e078bb066c5921ec909e6f7099fe0e8bce47a2b329048ae7fc5c7fec3b1bf50f"} Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.398900 4664 generic.go:334] "Generic (PLEG): container finished" podID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerID="c031c8becf6a1b66bbc7eb416253522dd40a6c91b4f52dca0f7a104741deabf5" exitCode=0 Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.399325 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" event={"ID":"4fa11d17-6316-4c38-88e0-a7a5c293b9d1","Type":"ContainerDied","Data":"c031c8becf6a1b66bbc7eb416253522dd40a6c91b4f52dca0f7a104741deabf5"} Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.407187 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d67b46cc4-4zwj2" event={"ID":"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5","Type":"ContainerStarted","Data":"b5776d7a67e6f0092c9361b5c40ec63d9ad10dd6ea73c6047b94807e0dd74f39"} Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.854589 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-79f9447d46-mnb2r"] Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.856146 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.865171 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.865310 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.917110 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-79f9447d46-mnb2r"] Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.969672 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-combined-ca-bundle\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.969839 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-internal-tls-certs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.969972 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/576e5bbc-aadf-4530-9a7f-bcf8401874a6-logs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.970028 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-config-data\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.970067 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-config-data-custom\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.970099 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-public-tls-certs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:22 crc kubenswrapper[4664]: I1013 07:04:22.970121 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8qjt\" (UniqueName: \"kubernetes.io/projected/576e5bbc-aadf-4530-9a7f-bcf8401874a6-kube-api-access-b8qjt\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.072625 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/576e5bbc-aadf-4530-9a7f-bcf8401874a6-logs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.072684 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-config-data\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.072710 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-config-data-custom\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.072736 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-public-tls-certs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.072757 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8qjt\" (UniqueName: \"kubernetes.io/projected/576e5bbc-aadf-4530-9a7f-bcf8401874a6-kube-api-access-b8qjt\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.072839 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-combined-ca-bundle\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.072889 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-internal-tls-certs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.087563 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-public-tls-certs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.087998 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/576e5bbc-aadf-4530-9a7f-bcf8401874a6-logs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.122738 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-config-data-custom\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.124674 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-combined-ca-bundle\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.147028 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-config-data\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.155637 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/576e5bbc-aadf-4530-9a7f-bcf8401874a6-internal-tls-certs\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.181559 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8qjt\" (UniqueName: \"kubernetes.io/projected/576e5bbc-aadf-4530-9a7f-bcf8401874a6-kube-api-access-b8qjt\") pod \"barbican-api-79f9447d46-mnb2r\" (UID: \"576e5bbc-aadf-4530-9a7f-bcf8401874a6\") " pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.183805 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.480993 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d67b46cc4-4zwj2" event={"ID":"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5","Type":"ContainerStarted","Data":"0c7c10d42ac82800460d5083107bad03d64f54e32a7163a2e1af4842853d3b60"} Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.481344 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.481360 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.521309 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" event={"ID":"4fa11d17-6316-4c38-88e0-a7a5c293b9d1","Type":"ContainerStarted","Data":"27beb3d557fd1cee8bf4feee30917e63015a5e2132eab765bd78b2d83572b6cf"} Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.522209 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.585230 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podStartSLOduration=4.585205724 podStartE2EDuration="4.585205724s" podCreationTimestamp="2025-10-13 07:04:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:23.522485638 +0000 UTC m=+1071.209930850" watchObservedRunningTime="2025-10-13 07:04:23.585205724 +0000 UTC m=+1071.272650916" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.597417 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" podStartSLOduration=4.597377419 podStartE2EDuration="4.597377419s" podCreationTimestamp="2025-10-13 07:04:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:23.547604349 +0000 UTC m=+1071.235049551" watchObservedRunningTime="2025-10-13 07:04:23.597377419 +0000 UTC m=+1071.284822611" Oct 13 07:04:23 crc kubenswrapper[4664]: I1013 07:04:23.683274 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-79f9447d46-mnb2r"] Oct 13 07:04:23 crc kubenswrapper[4664]: W1013 07:04:23.707992 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod576e5bbc_aadf_4530_9a7f_bcf8401874a6.slice/crio-f5dcf214094beadeaae2f5cf3e8198cea728e5add1483fe9fe2dd700a7d3d14f WatchSource:0}: Error finding container f5dcf214094beadeaae2f5cf3e8198cea728e5add1483fe9fe2dd700a7d3d14f: Status 404 returned error can't find the container with id f5dcf214094beadeaae2f5cf3e8198cea728e5add1483fe9fe2dd700a7d3d14f Oct 13 07:04:24 crc kubenswrapper[4664]: I1013 07:04:24.533421 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jbdfp" event={"ID":"0b2b826f-7b2a-4de6-9f80-7c854b988a67","Type":"ContainerStarted","Data":"f742c5133839c6b2697a028bedaa5b2c6a999aa584c199191c899bcc0e844579"} Oct 13 07:04:24 crc kubenswrapper[4664]: I1013 07:04:24.540119 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-79f9447d46-mnb2r" event={"ID":"576e5bbc-aadf-4530-9a7f-bcf8401874a6","Type":"ContainerStarted","Data":"4dbbfa1940dff63908f03e5b37561d57af248627aeaf38644289ee9edd3960d8"} Oct 13 07:04:24 crc kubenswrapper[4664]: I1013 07:04:24.540154 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-79f9447d46-mnb2r" event={"ID":"576e5bbc-aadf-4530-9a7f-bcf8401874a6","Type":"ContainerStarted","Data":"f5dcf214094beadeaae2f5cf3e8198cea728e5add1483fe9fe2dd700a7d3d14f"} Oct 13 07:04:24 crc kubenswrapper[4664]: I1013 07:04:24.591777 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-jbdfp" podStartSLOduration=4.3032758 podStartE2EDuration="1m6.591756309s" podCreationTimestamp="2025-10-13 07:03:18 +0000 UTC" firstStartedPulling="2025-10-13 07:03:20.193175718 +0000 UTC m=+1007.880620910" lastFinishedPulling="2025-10-13 07:04:22.481656227 +0000 UTC m=+1070.169101419" observedRunningTime="2025-10-13 07:04:24.58205376 +0000 UTC m=+1072.269498962" watchObservedRunningTime="2025-10-13 07:04:24.591756309 +0000 UTC m=+1072.279201501" Oct 13 07:04:27 crc kubenswrapper[4664]: I1013 07:04:27.741044 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:04:27 crc kubenswrapper[4664]: I1013 07:04:27.873116 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:04:28 crc kubenswrapper[4664]: I1013 07:04:28.576508 4664 generic.go:334] "Generic (PLEG): container finished" podID="ad23ea69-3e65-4f4c-afdc-21abded4e19c" containerID="cbbbe3b2a274195ab47e30f3b223cc94f88a8d640fa6fef4401543959fba725e" exitCode=0 Oct 13 07:04:28 crc kubenswrapper[4664]: I1013 07:04:28.576571 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2x2fr" event={"ID":"ad23ea69-3e65-4f4c-afdc-21abded4e19c","Type":"ContainerDied","Data":"cbbbe3b2a274195ab47e30f3b223cc94f88a8d640fa6fef4401543959fba725e"} Oct 13 07:04:29 crc kubenswrapper[4664]: I1013 07:04:29.586265 4664 generic.go:334] "Generic (PLEG): container finished" podID="0ca51385-fe74-4d5b-a542-f33734fb8e46" containerID="e3db5d83c1b6440ebe159e708aa3bd5086266878c5e5e5c6a2f61fa012f97ab6" exitCode=0 Oct 13 07:04:29 crc kubenswrapper[4664]: I1013 07:04:29.586351 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7nsnc" event={"ID":"0ca51385-fe74-4d5b-a542-f33734fb8e46","Type":"ContainerDied","Data":"e3db5d83c1b6440ebe159e708aa3bd5086266878c5e5e5c6a2f61fa012f97ab6"} Oct 13 07:04:30 crc kubenswrapper[4664]: I1013 07:04:30.018040 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:30 crc kubenswrapper[4664]: I1013 07:04:30.111853 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64c58ff495-zb76j"] Oct 13 07:04:30 crc kubenswrapper[4664]: I1013 07:04:30.112070 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerName="dnsmasq-dns" containerID="cri-o://bd3cde87b9b883059a0502643132ec9f20c4070857606a45dcf7b700fd6a6e0c" gracePeriod=10 Oct 13 07:04:30 crc kubenswrapper[4664]: I1013 07:04:30.603273 4664 generic.go:334] "Generic (PLEG): container finished" podID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerID="bd3cde87b9b883059a0502643132ec9f20c4070857606a45dcf7b700fd6a6e0c" exitCode=0 Oct 13 07:04:30 crc kubenswrapper[4664]: I1013 07:04:30.603461 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" event={"ID":"45e17674-0bf2-491b-8c3b-73b83dcfc3d8","Type":"ContainerDied","Data":"bd3cde87b9b883059a0502643132ec9f20c4070857606a45dcf7b700fd6a6e0c"} Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.612511 4664 generic.go:334] "Generic (PLEG): container finished" podID="0b2b826f-7b2a-4de6-9f80-7c854b988a67" containerID="f742c5133839c6b2697a028bedaa5b2c6a999aa584c199191c899bcc0e844579" exitCode=0 Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.612568 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jbdfp" event={"ID":"0b2b826f-7b2a-4de6-9f80-7c854b988a67","Type":"ContainerDied","Data":"f742c5133839c6b2697a028bedaa5b2c6a999aa584c199191c899bcc0e844579"} Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.898436 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2x2fr" Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.904524 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7nsnc" Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.945006 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-combined-ca-bundle\") pod \"0ca51385-fe74-4d5b-a542-f33734fb8e46\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.945092 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-config-data\") pod \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.945126 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rh476\" (UniqueName: \"kubernetes.io/projected/ad23ea69-3e65-4f4c-afdc-21abded4e19c-kube-api-access-rh476\") pod \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.945159 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-combined-ca-bundle\") pod \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\" (UID: \"ad23ea69-3e65-4f4c-afdc-21abded4e19c\") " Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.945210 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkg74\" (UniqueName: \"kubernetes.io/projected/0ca51385-fe74-4d5b-a542-f33734fb8e46-kube-api-access-pkg74\") pod \"0ca51385-fe74-4d5b-a542-f33734fb8e46\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.945230 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-config-data\") pod \"0ca51385-fe74-4d5b-a542-f33734fb8e46\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.945244 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-db-sync-config-data\") pod \"0ca51385-fe74-4d5b-a542-f33734fb8e46\" (UID: \"0ca51385-fe74-4d5b-a542-f33734fb8e46\") " Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.960040 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad23ea69-3e65-4f4c-afdc-21abded4e19c-kube-api-access-rh476" (OuterVolumeSpecName: "kube-api-access-rh476") pod "ad23ea69-3e65-4f4c-afdc-21abded4e19c" (UID: "ad23ea69-3e65-4f4c-afdc-21abded4e19c"). InnerVolumeSpecName "kube-api-access-rh476". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.965992 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0ca51385-fe74-4d5b-a542-f33734fb8e46" (UID: "0ca51385-fe74-4d5b-a542-f33734fb8e46"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:31 crc kubenswrapper[4664]: I1013 07:04:31.970860 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ca51385-fe74-4d5b-a542-f33734fb8e46-kube-api-access-pkg74" (OuterVolumeSpecName: "kube-api-access-pkg74") pod "0ca51385-fe74-4d5b-a542-f33734fb8e46" (UID: "0ca51385-fe74-4d5b-a542-f33734fb8e46"). InnerVolumeSpecName "kube-api-access-pkg74". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.017733 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ca51385-fe74-4d5b-a542-f33734fb8e46" (UID: "0ca51385-fe74-4d5b-a542-f33734fb8e46"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.040911 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.154:5353: connect: connection refused" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.052738 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.052764 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rh476\" (UniqueName: \"kubernetes.io/projected/ad23ea69-3e65-4f4c-afdc-21abded4e19c-kube-api-access-rh476\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.052776 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkg74\" (UniqueName: \"kubernetes.io/projected/0ca51385-fe74-4d5b-a542-f33734fb8e46-kube-api-access-pkg74\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.052785 4664 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.290386 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad23ea69-3e65-4f4c-afdc-21abded4e19c" (UID: "ad23ea69-3e65-4f4c-afdc-21abded4e19c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.325274 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-config-data" (OuterVolumeSpecName: "config-data") pod "0ca51385-fe74-4d5b-a542-f33734fb8e46" (UID: "0ca51385-fe74-4d5b-a542-f33734fb8e46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.330893 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-config-data" (OuterVolumeSpecName: "config-data") pod "ad23ea69-3e65-4f4c-afdc-21abded4e19c" (UID: "ad23ea69-3e65-4f4c-afdc-21abded4e19c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.367609 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.368891 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.368912 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ca51385-fe74-4d5b-a542-f33734fb8e46-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.368922 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad23ea69-3e65-4f4c-afdc-21abded4e19c-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.384255 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.446014 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.573175 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-svc\") pod \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.573255 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvlkd\" (UniqueName: \"kubernetes.io/projected/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-kube-api-access-qvlkd\") pod \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.573325 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-swift-storage-0\") pod \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.573534 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-nb\") pod \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.573581 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-sb\") pod \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.573623 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-config\") pod \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\" (UID: \"45e17674-0bf2-491b-8c3b-73b83dcfc3d8\") " Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.609031 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-kube-api-access-qvlkd" (OuterVolumeSpecName: "kube-api-access-qvlkd") pod "45e17674-0bf2-491b-8c3b-73b83dcfc3d8" (UID: "45e17674-0bf2-491b-8c3b-73b83dcfc3d8"). InnerVolumeSpecName "kube-api-access-qvlkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:32 crc kubenswrapper[4664]: E1013 07:04:32.625406 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.652024 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" event={"ID":"75f2bbf2-f367-4513-bb39-e403007183a8","Type":"ContainerStarted","Data":"c683ce35ea41f9cbac4181901cb798ffe254a30f4fff3d8fee21fd0465e62ff8"} Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.660996 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" event={"ID":"45e17674-0bf2-491b-8c3b-73b83dcfc3d8","Type":"ContainerDied","Data":"bfa2ce522bf760e83cdef7eef566923767c3ab361cde5ab5954dca7c7b444406"} Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.661047 4664 scope.go:117] "RemoveContainer" containerID="bd3cde87b9b883059a0502643132ec9f20c4070857606a45dcf7b700fd6a6e0c" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.661168 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64c58ff495-zb76j" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.675952 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvlkd\" (UniqueName: \"kubernetes.io/projected/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-kube-api-access-qvlkd\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.685155 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerStarted","Data":"3e98e310e9273a988cb52efff2ff3e2fde492c3c614dc7f1aae5eacb75813db4"} Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.685364 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="ceilometer-notification-agent" containerID="cri-o://0bad5cc6c81cb37888da1bd0712f04802297fd0a68a2dfb49c78826e80a749b2" gracePeriod=30 Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.685655 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.685909 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="proxy-httpd" containerID="cri-o://3e98e310e9273a988cb52efff2ff3e2fde492c3c614dc7f1aae5eacb75813db4" gracePeriod=30 Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.685948 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="sg-core" containerID="cri-o://51601bdca4f7d778209c81f850c87649d1cb6bfc7959dddcc21d5b46b7c5c604" gracePeriod=30 Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.692298 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.701763 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-79f9447d46-mnb2r" event={"ID":"576e5bbc-aadf-4530-9a7f-bcf8401874a6","Type":"ContainerStarted","Data":"aea941205cc2dfbdef6394b2ecba3f47226e96870592b1c9586bdba37cdbb0a9"} Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.702330 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.702375 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.713366 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7nsnc" event={"ID":"0ca51385-fe74-4d5b-a542-f33734fb8e46","Type":"ContainerDied","Data":"bc1e16028c08de853d684ae6ddb79408b58f970a469ccab912d684ffb797aee8"} Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.713412 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc1e16028c08de853d684ae6ddb79408b58f970a469ccab912d684ffb797aee8" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.713464 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7nsnc" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.715872 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2x2fr" event={"ID":"ad23ea69-3e65-4f4c-afdc-21abded4e19c","Type":"ContainerDied","Data":"c6a1e5994f311fd061f347f12b8cd26072a103a0dd303041dc9e8562def65ff3"} Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.715921 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6a1e5994f311fd061f347f12b8cd26072a103a0dd303041dc9e8562def65ff3" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.716373 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2x2fr" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.834574 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-79f9447d46-mnb2r" podStartSLOduration=10.834553023 podStartE2EDuration="10.834553023s" podCreationTimestamp="2025-10-13 07:04:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:32.784331341 +0000 UTC m=+1080.471776533" watchObservedRunningTime="2025-10-13 07:04:32.834553023 +0000 UTC m=+1080.521998215" Oct 13 07:04:32 crc kubenswrapper[4664]: I1013 07:04:32.884270 4664 scope.go:117] "RemoveContainer" containerID="fe80ee217c5802187b19df280b32bda094f0ff62ddd6725f72ce8b3724716482" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.166238 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-config" (OuterVolumeSpecName: "config") pod "45e17674-0bf2-491b-8c3b-73b83dcfc3d8" (UID: "45e17674-0bf2-491b-8c3b-73b83dcfc3d8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.173586 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "45e17674-0bf2-491b-8c3b-73b83dcfc3d8" (UID: "45e17674-0bf2-491b-8c3b-73b83dcfc3d8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.178313 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "45e17674-0bf2-491b-8c3b-73b83dcfc3d8" (UID: "45e17674-0bf2-491b-8c3b-73b83dcfc3d8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.205237 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.205262 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.205271 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.242869 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "45e17674-0bf2-491b-8c3b-73b83dcfc3d8" (UID: "45e17674-0bf2-491b-8c3b-73b83dcfc3d8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.307878 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.352959 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "45e17674-0bf2-491b-8c3b-73b83dcfc3d8" (UID: "45e17674-0bf2-491b-8c3b-73b83dcfc3d8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.413439 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45e17674-0bf2-491b-8c3b-73b83dcfc3d8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.442289 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.554150 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d7bfc9c85-6smx8"] Oct 13 07:04:33 crc kubenswrapper[4664]: E1013 07:04:33.555077 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerName="init" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555104 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerName="init" Oct 13 07:04:33 crc kubenswrapper[4664]: E1013 07:04:33.555123 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad23ea69-3e65-4f4c-afdc-21abded4e19c" containerName="heat-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555131 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad23ea69-3e65-4f4c-afdc-21abded4e19c" containerName="heat-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: E1013 07:04:33.555174 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ca51385-fe74-4d5b-a542-f33734fb8e46" containerName="glance-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555189 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ca51385-fe74-4d5b-a542-f33734fb8e46" containerName="glance-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: E1013 07:04:33.555202 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b2b826f-7b2a-4de6-9f80-7c854b988a67" containerName="cinder-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555210 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b2b826f-7b2a-4de6-9f80-7c854b988a67" containerName="cinder-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: E1013 07:04:33.555230 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerName="dnsmasq-dns" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555238 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerName="dnsmasq-dns" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555623 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" containerName="dnsmasq-dns" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555653 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad23ea69-3e65-4f4c-afdc-21abded4e19c" containerName="heat-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555692 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b2b826f-7b2a-4de6-9f80-7c854b988a67" containerName="cinder-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.555709 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ca51385-fe74-4d5b-a542-f33734fb8e46" containerName="glance-db-sync" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.562028 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.621177 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-combined-ca-bundle\") pod \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.621245 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzmkq\" (UniqueName: \"kubernetes.io/projected/0b2b826f-7b2a-4de6-9f80-7c854b988a67-kube-api-access-fzmkq\") pod \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.621288 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0b2b826f-7b2a-4de6-9f80-7c854b988a67-etc-machine-id\") pod \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.621353 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-config-data\") pod \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.621417 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-db-sync-config-data\") pod \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.621475 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-scripts\") pod \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\" (UID: \"0b2b826f-7b2a-4de6-9f80-7c854b988a67\") " Oct 13 07:04:33 crc kubenswrapper[4664]: I1013 07:04:33.624340 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0b2b826f-7b2a-4de6-9f80-7c854b988a67-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0b2b826f-7b2a-4de6-9f80-7c854b988a67" (UID: "0b2b826f-7b2a-4de6-9f80-7c854b988a67"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.640331 4664 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0b2b826f-7b2a-4de6-9f80-7c854b988a67-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.645223 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b2b826f-7b2a-4de6-9f80-7c854b988a67-kube-api-access-fzmkq" (OuterVolumeSpecName: "kube-api-access-fzmkq") pod "0b2b826f-7b2a-4de6-9f80-7c854b988a67" (UID: "0b2b826f-7b2a-4de6-9f80-7c854b988a67"). InnerVolumeSpecName "kube-api-access-fzmkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.647936 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-scripts" (OuterVolumeSpecName: "scripts") pod "0b2b826f-7b2a-4de6-9f80-7c854b988a67" (UID: "0b2b826f-7b2a-4de6-9f80-7c854b988a67"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.649477 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d7bfc9c85-6smx8"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.651014 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0b2b826f-7b2a-4de6-9f80-7c854b988a67" (UID: "0b2b826f-7b2a-4de6-9f80-7c854b988a67"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741519 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-config\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741594 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-sb\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741619 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-nb\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741668 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-svc\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741772 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkn8g\" (UniqueName: \"kubernetes.io/projected/9d623182-6f91-459d-81fa-dbb5e6985769-kube-api-access-rkn8g\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741816 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-swift-storage-0\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741856 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzmkq\" (UniqueName: \"kubernetes.io/projected/0b2b826f-7b2a-4de6-9f80-7c854b988a67-kube-api-access-fzmkq\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741865 4664 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.741890 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.756287 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b2b826f-7b2a-4de6-9f80-7c854b988a67" (UID: "0b2b826f-7b2a-4de6-9f80-7c854b988a67"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.761553 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-config-data" (OuterVolumeSpecName: "config-data") pod "0b2b826f-7b2a-4de6-9f80-7c854b988a67" (UID: "0b2b826f-7b2a-4de6-9f80-7c854b988a67"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.765303 4664 generic.go:334] "Generic (PLEG): container finished" podID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerID="51601bdca4f7d778209c81f850c87649d1cb6bfc7959dddcc21d5b46b7c5c604" exitCode=2 Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.765333 4664 generic.go:334] "Generic (PLEG): container finished" podID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerID="0bad5cc6c81cb37888da1bd0712f04802297fd0a68a2dfb49c78826e80a749b2" exitCode=0 Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.765379 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerDied","Data":"51601bdca4f7d778209c81f850c87649d1cb6bfc7959dddcc21d5b46b7c5c604"} Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.765408 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerDied","Data":"0bad5cc6c81cb37888da1bd0712f04802297fd0a68a2dfb49c78826e80a749b2"} Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.774240 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b55d75d57-5ft48" event={"ID":"48a7c25d-db6e-46dd-8b73-c6269b23cb5e","Type":"ContainerStarted","Data":"3ac857e36ef57e3758b21959a2f386481995a96237b61550a2639a4784e9c534"} Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.781699 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jbdfp" event={"ID":"0b2b826f-7b2a-4de6-9f80-7c854b988a67","Type":"ContainerDied","Data":"6374d70dab4441c07cffed87f340fc697ee18aebf96291a5b3e94813b9bbea84"} Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.781743 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6374d70dab4441c07cffed87f340fc697ee18aebf96291a5b3e94813b9bbea84" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.783917 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jbdfp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.846846 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-nb\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.846897 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-svc\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.847039 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkn8g\" (UniqueName: \"kubernetes.io/projected/9d623182-6f91-459d-81fa-dbb5e6985769-kube-api-access-rkn8g\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.847066 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-swift-storage-0\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.847089 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-config\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.847142 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-sb\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.847193 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.847205 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b2b826f-7b2a-4de6-9f80-7c854b988a67-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.848694 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-swift-storage-0\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.849950 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-sb\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.850676 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-nb\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.854575 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-config\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.862018 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-svc\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.892649 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkn8g\" (UniqueName: \"kubernetes.io/projected/9d623182-6f91-459d-81fa-dbb5e6985769-kube-api-access-rkn8g\") pod \"dnsmasq-dns-6d7bfc9c85-6smx8\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.982181 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5b55d75d57-5ft48" podStartSLOduration=4.061799153 podStartE2EDuration="14.982161348s" podCreationTimestamp="2025-10-13 07:04:19 +0000 UTC" firstStartedPulling="2025-10-13 07:04:20.874609961 +0000 UTC m=+1068.562055153" lastFinishedPulling="2025-10-13 07:04:31.794972156 +0000 UTC m=+1079.482417348" observedRunningTime="2025-10-13 07:04:33.801758435 +0000 UTC m=+1081.489203647" watchObservedRunningTime="2025-10-13 07:04:33.982161348 +0000 UTC m=+1081.669606540" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.987895 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:33.989295 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.020506 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sqpcs" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.020693 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.021276 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.021388 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.023376 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.054966 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.055009 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-scripts\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.055046 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.055123 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.055165 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg8xw\" (UniqueName: \"kubernetes.io/projected/f4507fc8-d71b-40bd-bc28-e21d538fc081-kube-api-access-qg8xw\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.055240 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4507fc8-d71b-40bd-bc28-e21d538fc081-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.079931 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d7bfc9c85-6smx8"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.157038 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.157102 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.157137 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg8xw\" (UniqueName: \"kubernetes.io/projected/f4507fc8-d71b-40bd-bc28-e21d538fc081-kube-api-access-qg8xw\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.157206 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4507fc8-d71b-40bd-bc28-e21d538fc081-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.157251 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.157266 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-scripts\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.162099 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4507fc8-d71b-40bd-bc28-e21d538fc081-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.171543 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56c55fd88c-shnsp"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.173009 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.190735 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.190916 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56c55fd88c-shnsp"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.215983 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.217364 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.221408 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.233186 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.233784 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.296627 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg8xw\" (UniqueName: \"kubernetes.io/projected/f4507fc8-d71b-40bd-bc28-e21d538fc081-kube-api-access-qg8xw\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.305304 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-scripts\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.321605 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.365831 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.365886 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-scripts\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.365909 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-config\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.365935 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data-custom\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.365952 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.365972 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.366013 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-svc\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.366032 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g68jh\" (UniqueName: \"kubernetes.io/projected/f450f736-0236-4e48-b810-e09e43815bc9-kube-api-access-g68jh\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.366075 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-logs\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.366097 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-swift-storage-0\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.366112 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-sb\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.366136 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-nb\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.366153 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4q5s\" (UniqueName: \"kubernetes.io/projected/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-kube-api-access-k4q5s\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.370704 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.417893 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64c58ff495-zb76j"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.424555 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64c58ff495-zb76j"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.436181 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468277 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data-custom\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468326 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468356 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468405 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-svc\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468429 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g68jh\" (UniqueName: \"kubernetes.io/projected/f450f736-0236-4e48-b810-e09e43815bc9-kube-api-access-g68jh\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468468 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-logs\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468491 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-swift-storage-0\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468507 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-sb\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468530 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-nb\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468545 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4q5s\" (UniqueName: \"kubernetes.io/projected/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-kube-api-access-k4q5s\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468603 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468642 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-config\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.468660 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-scripts\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.469965 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-logs\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.470572 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-swift-storage-0\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.471149 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-nb\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.471193 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.471533 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-sb\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.472169 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-svc\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.473815 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-config\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.475212 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-scripts\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.478496 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.487471 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.490867 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g68jh\" (UniqueName: \"kubernetes.io/projected/f450f736-0236-4e48-b810-e09e43815bc9-kube-api-access-g68jh\") pod \"dnsmasq-dns-56c55fd88c-shnsp\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.493415 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data-custom\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.493454 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4q5s\" (UniqueName: \"kubernetes.io/projected/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-kube-api-access-k4q5s\") pod \"cinder-api-0\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.638820 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.640844 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.646310 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.646823 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.647012 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rbmdn" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.682883 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.753225 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.760686 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.762918 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.764449 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.769782 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776339 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776438 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-scripts\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776464 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-logs\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776481 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776543 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-config-data\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776589 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776655 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkd9x\" (UniqueName: \"kubernetes.io/projected/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-kube-api-access-bkd9x\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.776744 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.876216 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" event={"ID":"75f2bbf2-f367-4513-bb39-e403007183a8","Type":"ContainerStarted","Data":"8e7ffab74b5df96fed0e9bd8af194e28b3a75c855c57f7410cb6f545b3721379"} Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.878912 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-scripts\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.878965 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-logs\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.878993 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879031 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-config-data\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879089 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879141 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkd9x\" (UniqueName: \"kubernetes.io/projected/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-kube-api-access-bkd9x\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879176 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879207 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879246 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-logs\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879275 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879304 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879328 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879350 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.879377 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vtkw\" (UniqueName: \"kubernetes.io/projected/4a55d605-35c5-47f4-8264-c27620f4732f-kube-api-access-8vtkw\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.880021 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-logs\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.888727 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.890442 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.915036 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b55d75d57-5ft48" event={"ID":"48a7c25d-db6e-46dd-8b73-c6269b23cb5e","Type":"ContainerStarted","Data":"e6bbba80634d77e5b833495e967c8704aa018479e2403d283f01fc011ae3188a"} Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.924662 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-config-data\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.932488 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-75bf6d8446-kj4kf" podStartSLOduration=4.7710848630000005 podStartE2EDuration="15.932467089s" podCreationTimestamp="2025-10-13 07:04:19 +0000 UTC" firstStartedPulling="2025-10-13 07:04:20.620458899 +0000 UTC m=+1068.307904081" lastFinishedPulling="2025-10-13 07:04:31.781841115 +0000 UTC m=+1079.469286307" observedRunningTime="2025-10-13 07:04:34.923016226 +0000 UTC m=+1082.610461418" watchObservedRunningTime="2025-10-13 07:04:34.932467089 +0000 UTC m=+1082.619912271" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.940865 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-scripts\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.956817 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:34 crc kubenswrapper[4664]: I1013 07:04:34.957430 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkd9x\" (UniqueName: \"kubernetes.io/projected/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-kube-api-access-bkd9x\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.994565 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.994638 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.994661 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.994688 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vtkw\" (UniqueName: \"kubernetes.io/projected/4a55d605-35c5-47f4-8264-c27620f4732f-kube-api-access-8vtkw\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.994942 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.994964 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.994995 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-logs\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.995475 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-logs\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:34.995673 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.007680 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.048730 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.053938 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.100145 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: W1013 07:04:35.100304 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d623182_6f91_459d_81fa_dbb5e6985769.slice/crio-e4f3ccfa1069b5178a4b79e3befc0068328b46c35f890fa69aa7b308878af135 WatchSource:0}: Error finding container e4f3ccfa1069b5178a4b79e3befc0068328b46c35f890fa69aa7b308878af135: Status 404 returned error can't find the container with id e4f3ccfa1069b5178a4b79e3befc0068328b46c35f890fa69aa7b308878af135 Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.135861 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.205047 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vtkw\" (UniqueName: \"kubernetes.io/projected/4a55d605-35c5-47f4-8264-c27620f4732f-kube-api-access-8vtkw\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.217871 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.222338 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45e17674-0bf2-491b-8c3b-73b83dcfc3d8" path="/var/lib/kubelet/pods/45e17674-0bf2-491b-8c3b-73b83dcfc3d8/volumes" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.223382 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d7bfc9c85-6smx8"] Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.223419 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.298312 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.433671 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.553307 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56c55fd88c-shnsp"] Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.801393 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.954184 4664 generic.go:334] "Generic (PLEG): container finished" podID="9d623182-6f91-459d-81fa-dbb5e6985769" containerID="ab36384d654f9757806192d8621abab3a00decd905017db924c321a1417595d4" exitCode=0 Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.954263 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" event={"ID":"9d623182-6f91-459d-81fa-dbb5e6985769","Type":"ContainerDied","Data":"ab36384d654f9757806192d8621abab3a00decd905017db924c321a1417595d4"} Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.954290 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" event={"ID":"9d623182-6f91-459d-81fa-dbb5e6985769","Type":"ContainerStarted","Data":"e4f3ccfa1069b5178a4b79e3befc0068328b46c35f890fa69aa7b308878af135"} Oct 13 07:04:35 crc kubenswrapper[4664]: I1013 07:04:35.985080 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" event={"ID":"f450f736-0236-4e48-b810-e09e43815bc9","Type":"ContainerStarted","Data":"80a5040c576a65a37ad4a9bc641037ae53716298957e5eceb7d3ace5b95d338c"} Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.006772 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f4507fc8-d71b-40bd-bc28-e21d538fc081","Type":"ContainerStarted","Data":"4f274253c3d9f7b6a2817513269d86be77e515a7420cafdab4f5e0a5ae6744ec"} Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.318619 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.470344 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.571817 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.716390 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-swift-storage-0\") pod \"9d623182-6f91-459d-81fa-dbb5e6985769\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.716513 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkn8g\" (UniqueName: \"kubernetes.io/projected/9d623182-6f91-459d-81fa-dbb5e6985769-kube-api-access-rkn8g\") pod \"9d623182-6f91-459d-81fa-dbb5e6985769\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.716534 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-config\") pod \"9d623182-6f91-459d-81fa-dbb5e6985769\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.716698 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-nb\") pod \"9d623182-6f91-459d-81fa-dbb5e6985769\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.716738 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-svc\") pod \"9d623182-6f91-459d-81fa-dbb5e6985769\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.716759 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-sb\") pod \"9d623182-6f91-459d-81fa-dbb5e6985769\" (UID: \"9d623182-6f91-459d-81fa-dbb5e6985769\") " Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.793837 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d623182-6f91-459d-81fa-dbb5e6985769-kube-api-access-rkn8g" (OuterVolumeSpecName: "kube-api-access-rkn8g") pod "9d623182-6f91-459d-81fa-dbb5e6985769" (UID: "9d623182-6f91-459d-81fa-dbb5e6985769"). InnerVolumeSpecName "kube-api-access-rkn8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.819628 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkn8g\" (UniqueName: \"kubernetes.io/projected/9d623182-6f91-459d-81fa-dbb5e6985769-kube-api-access-rkn8g\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.861472 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9d623182-6f91-459d-81fa-dbb5e6985769" (UID: "9d623182-6f91-459d-81fa-dbb5e6985769"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.920858 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.921090 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.935582 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9d623182-6f91-459d-81fa-dbb5e6985769" (UID: "9d623182-6f91-459d-81fa-dbb5e6985769"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.962275 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-config" (OuterVolumeSpecName: "config") pod "9d623182-6f91-459d-81fa-dbb5e6985769" (UID: "9d623182-6f91-459d-81fa-dbb5e6985769"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.978294 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9d623182-6f91-459d-81fa-dbb5e6985769" (UID: "9d623182-6f91-459d-81fa-dbb5e6985769"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:36 crc kubenswrapper[4664]: I1013 07:04:36.984683 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9d623182-6f91-459d-81fa-dbb5e6985769" (UID: "9d623182-6f91-459d-81fa-dbb5e6985769"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.022817 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.022847 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.022858 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.022866 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d623182-6f91-459d-81fa-dbb5e6985769-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.041069 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.065334 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2","Type":"ContainerStarted","Data":"1e98d4df2655e670be7ad8a546a8430ca04ca0c5e4fcec5fb1ac588f20d25471"} Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.073978 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" event={"ID":"9d623182-6f91-459d-81fa-dbb5e6985769","Type":"ContainerDied","Data":"e4f3ccfa1069b5178a4b79e3befc0068328b46c35f890fa69aa7b308878af135"} Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.074031 4664 scope.go:117] "RemoveContainer" containerID="ab36384d654f9757806192d8621abab3a00decd905017db924c321a1417595d4" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.074158 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d7bfc9c85-6smx8" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.085572 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4a55d605-35c5-47f4-8264-c27620f4732f","Type":"ContainerStarted","Data":"aa1f57cef6f07aea030d92be113ddb0967c58cd6798a74fb79c979572e1ac110"} Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.137981 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9f3711f-0b9b-4f63-b21c-c9333ecb8902","Type":"ContainerStarted","Data":"41774e94d3ba6f3e960ac78c61ec3c367433e6d898a5c2468d9ea211dd4efea7"} Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.241912 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d7bfc9c85-6smx8"] Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.279258 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d7bfc9c85-6smx8"] Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.334448 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.410128 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.451123 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.749908 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.769475 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.769598 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-79f9447d46-mnb2r" podUID="576e5bbc-aadf-4530-9a7f-bcf8401874a6" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.163:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:37 crc kubenswrapper[4664]: I1013 07:04:37.871908 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:04:39 crc kubenswrapper[4664]: I1013 07:04:39.063878 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d623182-6f91-459d-81fa-dbb5e6985769" path="/var/lib/kubelet/pods/9d623182-6f91-459d-81fa-dbb5e6985769/volumes" Oct 13 07:04:39 crc kubenswrapper[4664]: I1013 07:04:39.216293 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f4507fc8-d71b-40bd-bc28-e21d538fc081","Type":"ContainerStarted","Data":"24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a"} Oct 13 07:04:39 crc kubenswrapper[4664]: I1013 07:04:39.232286 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4a55d605-35c5-47f4-8264-c27620f4732f","Type":"ContainerStarted","Data":"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e"} Oct 13 07:04:39 crc kubenswrapper[4664]: I1013 07:04:39.254073 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9f3711f-0b9b-4f63-b21c-c9333ecb8902","Type":"ContainerStarted","Data":"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae"} Oct 13 07:04:39 crc kubenswrapper[4664]: I1013 07:04:39.279930 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2","Type":"ContainerStarted","Data":"eb85e01702cc6fb245292c23300c46a0eb6d56860c3ae320f8c73478c42b7c41"} Oct 13 07:04:39 crc kubenswrapper[4664]: I1013 07:04:39.290283 4664 generic.go:334] "Generic (PLEG): container finished" podID="f450f736-0236-4e48-b810-e09e43815bc9" containerID="150e68804f6b155dc19a6939cc5d754ecb3ee9ea75baf72a5677cb35915e9dc8" exitCode=0 Oct 13 07:04:39 crc kubenswrapper[4664]: I1013 07:04:39.290382 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" event={"ID":"f450f736-0236-4e48-b810-e09e43815bc9","Type":"ContainerDied","Data":"150e68804f6b155dc19a6939cc5d754ecb3ee9ea75baf72a5677cb35915e9dc8"} Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.078052 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.189192 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-79f9447d46-mnb2r" podUID="576e5bbc-aadf-4530-9a7f-bcf8401874a6" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.163:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.241022 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.340445 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9f3711f-0b9b-4f63-b21c-c9333ecb8902","Type":"ContainerStarted","Data":"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be"} Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.340620 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-log" containerID="cri-o://a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae" gracePeriod=30 Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.341127 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-httpd" containerID="cri-o://e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be" gracePeriod=30 Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.363688 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2","Type":"ContainerStarted","Data":"e0cf0130f5607e4dc9e2b84b8aadf95a02db89c1a6f227ecc57f77553025c353"} Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.364249 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.383054 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.3830376189999996 podStartE2EDuration="7.383037619s" podCreationTimestamp="2025-10-13 07:04:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:40.379952786 +0000 UTC m=+1088.067397998" watchObservedRunningTime="2025-10-13 07:04:40.383037619 +0000 UTC m=+1088.070482811" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.389961 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" event={"ID":"f450f736-0236-4e48-b810-e09e43815bc9","Type":"ContainerStarted","Data":"df54618c28d4adf7d70187369bec6d5e5cf73b1758c53ca0c2b4938ffc18a235"} Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.390252 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.407489 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f4507fc8-d71b-40bd-bc28-e21d538fc081","Type":"ContainerStarted","Data":"b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46"} Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.415377 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.415361573 podStartE2EDuration="6.415361573s" podCreationTimestamp="2025-10-13 07:04:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:40.409078495 +0000 UTC m=+1088.096523687" watchObservedRunningTime="2025-10-13 07:04:40.415361573 +0000 UTC m=+1088.102806765" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.423141 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4a55d605-35c5-47f4-8264-c27620f4732f","Type":"ContainerStarted","Data":"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77"} Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.423309 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-log" containerID="cri-o://649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e" gracePeriod=30 Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.423634 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-httpd" containerID="cri-o://f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77" gracePeriod=30 Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.485554 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" podStartSLOduration=6.485540489 podStartE2EDuration="6.485540489s" podCreationTimestamp="2025-10-13 07:04:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:40.483201826 +0000 UTC m=+1088.170647018" watchObservedRunningTime="2025-10-13 07:04:40.485540489 +0000 UTC m=+1088.172985681" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.493014 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.619526871 podStartE2EDuration="7.493004438s" podCreationTimestamp="2025-10-13 07:04:33 +0000 UTC" firstStartedPulling="2025-10-13 07:04:35.267236547 +0000 UTC m=+1082.954681749" lastFinishedPulling="2025-10-13 07:04:36.140714134 +0000 UTC m=+1083.828159316" observedRunningTime="2025-10-13 07:04:40.44517363 +0000 UTC m=+1088.132618832" watchObservedRunningTime="2025-10-13 07:04:40.493004438 +0000 UTC m=+1088.180449630" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.496122 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.572997 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.5729800350000005 podStartE2EDuration="7.572980035s" podCreationTimestamp="2025-10-13 07:04:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:40.518951012 +0000 UTC m=+1088.206396204" watchObservedRunningTime="2025-10-13 07:04:40.572980035 +0000 UTC m=+1088.260425227" Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.615355 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b7789cdc8-2t2mz"] Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.615584 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-b7789cdc8-2t2mz" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-api" containerID="cri-o://810b3204424935fa735cbf37a668d73b3a982f147986947fa806549e6c51fe64" gracePeriod=30 Oct 13 07:04:40 crc kubenswrapper[4664]: I1013 07:04:40.615997 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-b7789cdc8-2t2mz" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-httpd" containerID="cri-o://a02de48f31d1eb5dff6f95bfd76ac165fbeefd66d8e68acae06e902c9a28947f" gracePeriod=30 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.350302 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.388138 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403005 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-logs\") pod \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403110 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"4a55d605-35c5-47f4-8264-c27620f4732f\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403155 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-httpd-run\") pod \"4a55d605-35c5-47f4-8264-c27620f4732f\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403193 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-logs\") pod \"4a55d605-35c5-47f4-8264-c27620f4732f\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403237 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-scripts\") pod \"4a55d605-35c5-47f4-8264-c27620f4732f\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403255 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-httpd-run\") pod \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403292 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-config-data\") pod \"4a55d605-35c5-47f4-8264-c27620f4732f\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403311 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkd9x\" (UniqueName: \"kubernetes.io/projected/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-kube-api-access-bkd9x\") pod \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403346 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-combined-ca-bundle\") pod \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403371 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403423 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-config-data\") pod \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403447 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vtkw\" (UniqueName: \"kubernetes.io/projected/4a55d605-35c5-47f4-8264-c27620f4732f-kube-api-access-8vtkw\") pod \"4a55d605-35c5-47f4-8264-c27620f4732f\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403497 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-scripts\") pod \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\" (UID: \"e9f3711f-0b9b-4f63-b21c-c9333ecb8902\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.403513 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-combined-ca-bundle\") pod \"4a55d605-35c5-47f4-8264-c27620f4732f\" (UID: \"4a55d605-35c5-47f4-8264-c27620f4732f\") " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.408468 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4a55d605-35c5-47f4-8264-c27620f4732f" (UID: "4a55d605-35c5-47f4-8264-c27620f4732f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.408894 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-logs" (OuterVolumeSpecName: "logs") pod "e9f3711f-0b9b-4f63-b21c-c9333ecb8902" (UID: "e9f3711f-0b9b-4f63-b21c-c9333ecb8902"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.409137 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e9f3711f-0b9b-4f63-b21c-c9333ecb8902" (UID: "e9f3711f-0b9b-4f63-b21c-c9333ecb8902"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.409329 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-logs" (OuterVolumeSpecName: "logs") pod "4a55d605-35c5-47f4-8264-c27620f4732f" (UID: "4a55d605-35c5-47f4-8264-c27620f4732f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.442097 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "e9f3711f-0b9b-4f63-b21c-c9333ecb8902" (UID: "e9f3711f-0b9b-4f63-b21c-c9333ecb8902"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.444179 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-scripts" (OuterVolumeSpecName: "scripts") pod "4a55d605-35c5-47f4-8264-c27620f4732f" (UID: "4a55d605-35c5-47f4-8264-c27620f4732f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.444505 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a55d605-35c5-47f4-8264-c27620f4732f-kube-api-access-8vtkw" (OuterVolumeSpecName: "kube-api-access-8vtkw") pod "4a55d605-35c5-47f4-8264-c27620f4732f" (UID: "4a55d605-35c5-47f4-8264-c27620f4732f"). InnerVolumeSpecName "kube-api-access-8vtkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.446098 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-kube-api-access-bkd9x" (OuterVolumeSpecName: "kube-api-access-bkd9x") pod "e9f3711f-0b9b-4f63-b21c-c9333ecb8902" (UID: "e9f3711f-0b9b-4f63-b21c-c9333ecb8902"). InnerVolumeSpecName "kube-api-access-bkd9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.469973 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-scripts" (OuterVolumeSpecName: "scripts") pod "e9f3711f-0b9b-4f63-b21c-c9333ecb8902" (UID: "e9f3711f-0b9b-4f63-b21c-c9333ecb8902"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.473175 4664 generic.go:334] "Generic (PLEG): container finished" podID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerID="a02de48f31d1eb5dff6f95bfd76ac165fbeefd66d8e68acae06e902c9a28947f" exitCode=0 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.473240 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b7789cdc8-2t2mz" event={"ID":"bcccda30-71c6-4461-8d5f-545fa60cb9ab","Type":"ContainerDied","Data":"a02de48f31d1eb5dff6f95bfd76ac165fbeefd66d8e68acae06e902c9a28947f"} Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.476078 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "4a55d605-35c5-47f4-8264-c27620f4732f" (UID: "4a55d605-35c5-47f4-8264-c27620f4732f"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.489201 4664 generic.go:334] "Generic (PLEG): container finished" podID="4a55d605-35c5-47f4-8264-c27620f4732f" containerID="f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77" exitCode=143 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.489238 4664 generic.go:334] "Generic (PLEG): container finished" podID="4a55d605-35c5-47f4-8264-c27620f4732f" containerID="649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e" exitCode=143 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.489288 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4a55d605-35c5-47f4-8264-c27620f4732f","Type":"ContainerDied","Data":"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77"} Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.489315 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4a55d605-35c5-47f4-8264-c27620f4732f","Type":"ContainerDied","Data":"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e"} Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.489324 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4a55d605-35c5-47f4-8264-c27620f4732f","Type":"ContainerDied","Data":"aa1f57cef6f07aea030d92be113ddb0967c58cd6798a74fb79c979572e1ac110"} Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.489339 4664 scope.go:117] "RemoveContainer" containerID="f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.489462 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.500970 4664 generic.go:334] "Generic (PLEG): container finished" podID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerID="e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be" exitCode=143 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.501008 4664 generic.go:334] "Generic (PLEG): container finished" podID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerID="a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae" exitCode=143 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.502431 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.506329 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9f3711f-0b9b-4f63-b21c-c9333ecb8902","Type":"ContainerDied","Data":"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be"} Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.506466 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9f3711f-0b9b-4f63-b21c-c9333ecb8902","Type":"ContainerDied","Data":"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae"} Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.507124 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9f3711f-0b9b-4f63-b21c-c9333ecb8902","Type":"ContainerDied","Data":"41774e94d3ba6f3e960ac78c61ec3c367433e6d898a5c2468d9ea211dd4efea7"} Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.507554 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api-log" containerID="cri-o://eb85e01702cc6fb245292c23300c46a0eb6d56860c3ae320f8c73478c42b7c41" gracePeriod=30 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.508279 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api" containerID="cri-o://e0cf0130f5607e4dc9e2b84b8aadf95a02db89c1a6f227ecc57f77553025c353" gracePeriod=30 Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525249 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a55d605-35c5-47f4-8264-c27620f4732f" (UID: "4a55d605-35c5-47f4-8264-c27620f4732f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525461 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vtkw\" (UniqueName: \"kubernetes.io/projected/4a55d605-35c5-47f4-8264-c27620f4732f-kube-api-access-8vtkw\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525483 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525492 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525500 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525519 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525528 4664 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525535 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a55d605-35c5-47f4-8264-c27620f4732f-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525543 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525550 4664 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525558 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkd9x\" (UniqueName: \"kubernetes.io/projected/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-kube-api-access-bkd9x\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.525573 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.583445 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9f3711f-0b9b-4f63-b21c-c9333ecb8902" (UID: "e9f3711f-0b9b-4f63-b21c-c9333ecb8902"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.599070 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.614714 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.619747 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-config-data" (OuterVolumeSpecName: "config-data") pod "e9f3711f-0b9b-4f63-b21c-c9333ecb8902" (UID: "e9f3711f-0b9b-4f63-b21c-c9333ecb8902"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.627062 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.627100 4664 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.627115 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f3711f-0b9b-4f63-b21c-c9333ecb8902-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.627126 4664 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.708205 4664 scope.go:117] "RemoveContainer" containerID="649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.716483 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-config-data" (OuterVolumeSpecName: "config-data") pod "4a55d605-35c5-47f4-8264-c27620f4732f" (UID: "4a55d605-35c5-47f4-8264-c27620f4732f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.729174 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a55d605-35c5-47f4-8264-c27620f4732f-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.763570 4664 scope.go:117] "RemoveContainer" containerID="f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.764894 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77\": container with ID starting with f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77 not found: ID does not exist" containerID="f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.765021 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77"} err="failed to get container status \"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77\": rpc error: code = NotFound desc = could not find container \"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77\": container with ID starting with f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77 not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.765154 4664 scope.go:117] "RemoveContainer" containerID="649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.768077 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e\": container with ID starting with 649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e not found: ID does not exist" containerID="649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.768125 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e"} err="failed to get container status \"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e\": rpc error: code = NotFound desc = could not find container \"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e\": container with ID starting with 649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.768153 4664 scope.go:117] "RemoveContainer" containerID="f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.769027 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77"} err="failed to get container status \"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77\": rpc error: code = NotFound desc = could not find container \"f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77\": container with ID starting with f3282c52b1b7f1ca8d65faca39cc565c0f880cf7c6cbb5af8d34b14af8067b77 not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.769064 4664 scope.go:117] "RemoveContainer" containerID="649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.769408 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e"} err="failed to get container status \"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e\": rpc error: code = NotFound desc = could not find container \"649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e\": container with ID starting with 649af319a78bd049acc121932164a156c766d7a2ba86e86ff32e1266e76d313e not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.769426 4664 scope.go:117] "RemoveContainer" containerID="e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.789892 4664 scope.go:117] "RemoveContainer" containerID="a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.806606 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d4bdd7d_b693_4c99_a5bb_9d0a78ba3cd2.slice/crio-eb85e01702cc6fb245292c23300c46a0eb6d56860c3ae320f8c73478c42b7c41.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d4bdd7d_b693_4c99_a5bb_9d0a78ba3cd2.slice/crio-conmon-eb85e01702cc6fb245292c23300c46a0eb6d56860c3ae320f8c73478c42b7c41.scope\": RecentStats: unable to find data in memory cache]" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.828415 4664 scope.go:117] "RemoveContainer" containerID="e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.831348 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be\": container with ID starting with e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be not found: ID does not exist" containerID="e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.831395 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be"} err="failed to get container status \"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be\": rpc error: code = NotFound desc = could not find container \"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be\": container with ID starting with e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.831423 4664 scope.go:117] "RemoveContainer" containerID="a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.833690 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae\": container with ID starting with a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae not found: ID does not exist" containerID="a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.833720 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae"} err="failed to get container status \"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae\": rpc error: code = NotFound desc = could not find container \"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae\": container with ID starting with a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.833736 4664 scope.go:117] "RemoveContainer" containerID="e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.836864 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be"} err="failed to get container status \"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be\": rpc error: code = NotFound desc = could not find container \"e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be\": container with ID starting with e435386c4c47f6d739c33cdfc517ef9a0bba951719006015d3893b83e30df3be not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.836890 4664 scope.go:117] "RemoveContainer" containerID="a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.847177 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae"} err="failed to get container status \"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae\": rpc error: code = NotFound desc = could not find container \"a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae\": container with ID starting with a38110e1e44e2b84b5b1e5de86cf5255febe6e38c703736b263e1e163a3809ae not found: ID does not exist" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.920350 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.926397 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.940276 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.950655 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957206 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.957533 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-log" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957548 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-log" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.957560 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d623182-6f91-459d-81fa-dbb5e6985769" containerName="init" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957567 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d623182-6f91-459d-81fa-dbb5e6985769" containerName="init" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.957581 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-log" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957587 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-log" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.957598 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-httpd" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957604 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-httpd" Oct 13 07:04:41 crc kubenswrapper[4664]: E1013 07:04:41.957637 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-httpd" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957643 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-httpd" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957806 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-log" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957821 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-httpd" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957834 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" containerName="glance-log" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957848 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d623182-6f91-459d-81fa-dbb5e6985769" containerName="init" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.957861 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" containerName="glance-httpd" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.958765 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.966079 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.966278 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rbmdn" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.966548 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.966655 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 13 07:04:41 crc kubenswrapper[4664]: I1013 07:04:41.988805 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.020761 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.023725 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.050551 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.051288 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.104895 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.170180 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-logs\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.170504 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-config-data\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.170595 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-scripts\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.170667 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.170750 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.170854 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctp88\" (UniqueName: \"kubernetes.io/projected/fed0bdbf-7ada-4393-972d-e29465a603f9-kube-api-access-ctp88\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.170925 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171028 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171123 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171206 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171281 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171350 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-logs\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171433 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171513 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171603 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.171680 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62chj\" (UniqueName: \"kubernetes.io/projected/ff491711-d62f-403e-8e23-3fa1d4137432-kube-api-access-62chj\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.206679 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273721 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-scripts\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273762 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273787 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273842 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctp88\" (UniqueName: \"kubernetes.io/projected/fed0bdbf-7ada-4393-972d-e29465a603f9-kube-api-access-ctp88\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273859 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273891 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273929 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273949 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273966 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273979 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-logs\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.273998 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.274018 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.274039 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.274062 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62chj\" (UniqueName: \"kubernetes.io/projected/ff491711-d62f-403e-8e23-3fa1d4137432-kube-api-access-62chj\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.274083 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-logs\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.274123 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-config-data\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.274750 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.274922 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.275216 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.276164 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.277271 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-logs\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.277600 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-logs\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.284434 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-scripts\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.287997 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-config-data\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.290535 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.290780 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.294580 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.295012 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.298571 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctp88\" (UniqueName: \"kubernetes.io/projected/fed0bdbf-7ada-4393-972d-e29465a603f9-kube-api-access-ctp88\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.298893 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.302921 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.327396 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62chj\" (UniqueName: \"kubernetes.io/projected/ff491711-d62f-403e-8e23-3fa1d4137432-kube-api-access-62chj\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.329118 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.332960 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.403651 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.524960 4664 generic.go:334] "Generic (PLEG): container finished" podID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerID="e0cf0130f5607e4dc9e2b84b8aadf95a02db89c1a6f227ecc57f77553025c353" exitCode=0 Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.524989 4664 generic.go:334] "Generic (PLEG): container finished" podID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerID="eb85e01702cc6fb245292c23300c46a0eb6d56860c3ae320f8c73478c42b7c41" exitCode=143 Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.525042 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2","Type":"ContainerDied","Data":"e0cf0130f5607e4dc9e2b84b8aadf95a02db89c1a6f227ecc57f77553025c353"} Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.525063 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2","Type":"ContainerDied","Data":"eb85e01702cc6fb245292c23300c46a0eb6d56860c3ae320f8c73478c42b7c41"} Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.598202 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.760585 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.779919 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-79f9447d46-mnb2r" podUID="576e5bbc-aadf-4530-9a7f-bcf8401874a6" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.163:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897124 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data-custom\") pod \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897205 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-combined-ca-bundle\") pod \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897242 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4q5s\" (UniqueName: \"kubernetes.io/projected/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-kube-api-access-k4q5s\") pod \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897263 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data\") pod \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897288 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-scripts\") pod \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897329 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-logs\") pod \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897391 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-etc-machine-id\") pod \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\" (UID: \"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2\") " Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.897985 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" (UID: "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.899906 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-logs" (OuterVolumeSpecName: "logs") pod "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" (UID: "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.913918 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" (UID: "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.918642 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-kube-api-access-k4q5s" (OuterVolumeSpecName: "kube-api-access-k4q5s") pod "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" (UID: "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2"). InnerVolumeSpecName "kube-api-access-k4q5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.918733 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-scripts" (OuterVolumeSpecName: "scripts") pod "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" (UID: "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.968943 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" (UID: "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.999421 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.999452 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.999461 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4q5s\" (UniqueName: \"kubernetes.io/projected/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-kube-api-access-k4q5s\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.999471 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.999480 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:42 crc kubenswrapper[4664]: I1013 07:04:42.999489 4664 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.064904 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data" (OuterVolumeSpecName: "config-data") pod "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" (UID: "2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.083114 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a55d605-35c5-47f4-8264-c27620f4732f" path="/var/lib/kubelet/pods/4a55d605-35c5-47f4-8264-c27620f4732f/volumes" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.093141 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9f3711f-0b9b-4f63-b21c-c9333ecb8902" path="/var/lib/kubelet/pods/e9f3711f-0b9b-4f63-b21c-c9333ecb8902/volumes" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.102890 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.157851 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.385149 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.546815 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2","Type":"ContainerDied","Data":"1e98d4df2655e670be7ad8a546a8430ca04ca0c5e4fcec5fb1ac588f20d25471"} Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.546855 4664 scope.go:117] "RemoveContainer" containerID="e0cf0130f5607e4dc9e2b84b8aadf95a02db89c1a6f227ecc57f77553025c353" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.546953 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.556511 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ff491711-d62f-403e-8e23-3fa1d4137432","Type":"ContainerStarted","Data":"10fb23438a565e9f20f57a2cfcf175423c3fce26f4ae99148247308904e48bff"} Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.576712 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.577644 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fed0bdbf-7ada-4393-972d-e29465a603f9","Type":"ContainerStarted","Data":"cc7d0b814a543ba991cb3017f52a08b0f0b680ef033863b3be27cd4f8347394d"} Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.589200 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.602319 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:43 crc kubenswrapper[4664]: E1013 07:04:43.602665 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api-log" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.602681 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api-log" Oct 13 07:04:43 crc kubenswrapper[4664]: E1013 07:04:43.602716 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.602723 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.604157 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api-log" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.604201 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" containerName="cinder-api" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.605286 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.615191 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.615421 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.615521 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.629321 4664 scope.go:117] "RemoveContainer" containerID="eb85e01702cc6fb245292c23300c46a0eb6d56860c3ae320f8c73478c42b7c41" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.660825 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.713283 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f726ffb-02b8-4628-b128-1422badcf6ae-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.713510 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.713608 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmmfl\" (UniqueName: \"kubernetes.io/projected/2f726ffb-02b8-4628-b128-1422badcf6ae-kube-api-access-kmmfl\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.713673 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-config-data-custom\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.713737 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-config-data\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.713873 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f726ffb-02b8-4628-b128-1422badcf6ae-logs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.713954 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.714021 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-scripts\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.714105 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.816192 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.816460 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f726ffb-02b8-4628-b128-1422badcf6ae-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.816562 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.816657 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmmfl\" (UniqueName: \"kubernetes.io/projected/2f726ffb-02b8-4628-b128-1422badcf6ae-kube-api-access-kmmfl\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.816739 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-config-data-custom\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.817013 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-config-data\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.817108 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f726ffb-02b8-4628-b128-1422badcf6ae-logs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.817187 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.817255 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-scripts\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.818889 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f726ffb-02b8-4628-b128-1422badcf6ae-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.834377 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f726ffb-02b8-4628-b128-1422badcf6ae-logs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.838662 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.838938 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmmfl\" (UniqueName: \"kubernetes.io/projected/2f726ffb-02b8-4628-b128-1422badcf6ae-kube-api-access-kmmfl\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.839306 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-scripts\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.839562 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-config-data-custom\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.846143 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-config-data\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.847997 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.854362 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f726ffb-02b8-4628-b128-1422badcf6ae-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2f726ffb-02b8-4628-b128-1422badcf6ae\") " pod="openstack/cinder-api-0" Oct 13 07:04:43 crc kubenswrapper[4664]: I1013 07:04:43.942233 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.436876 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.590474 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.625094 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ff491711-d62f-403e-8e23-3fa1d4137432","Type":"ContainerStarted","Data":"855fa1e1e273a560a58855113698758f8ae69c8f0fd44854faa09d3258c67c51"} Oct 13 07:04:44 crc kubenswrapper[4664]: W1013 07:04:44.638436 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f726ffb_02b8_4628_b128_1422badcf6ae.slice/crio-ad9f11412f6d28c72f09680807737f39e0868beefb629f6cb6a55450db3ea5e8 WatchSource:0}: Error finding container ad9f11412f6d28c72f09680807737f39e0868beefb629f6cb6a55450db3ea5e8: Status 404 returned error can't find the container with id ad9f11412f6d28c72f09680807737f39e0868beefb629f6cb6a55450db3ea5e8 Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.638589 4664 generic.go:334] "Generic (PLEG): container finished" podID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerID="810b3204424935fa735cbf37a668d73b3a982f147986947fa806549e6c51fe64" exitCode=0 Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.639054 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b7789cdc8-2t2mz" event={"ID":"bcccda30-71c6-4461-8d5f-545fa60cb9ab","Type":"ContainerDied","Data":"810b3204424935fa735cbf37a668d73b3a982f147986947fa806549e6c51fe64"} Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.643780 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fed0bdbf-7ada-4393-972d-e29465a603f9","Type":"ContainerStarted","Data":"ff665f3fc9ab4ed4b95c7d9e6e27533265c4c868ec292889fd035852a37009a4"} Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.763036 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.829250 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.853361 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65f7b65d4c-fmz64"] Oct 13 07:04:44 crc kubenswrapper[4664]: I1013 07:04:44.853605 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerName="dnsmasq-dns" containerID="cri-o://27beb3d557fd1cee8bf4feee30917e63015a5e2132eab765bd78b2d83572b6cf" gracePeriod=10 Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.015748 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.022880 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.161:5353: connect: connection refused" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.062960 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-httpd-config\") pod \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.063031 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-combined-ca-bundle\") pod \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.063092 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-ovndb-tls-certs\") pod \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.063114 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-config\") pod \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.063311 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc7qz\" (UniqueName: \"kubernetes.io/projected/bcccda30-71c6-4461-8d5f-545fa60cb9ab-kube-api-access-dc7qz\") pod \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\" (UID: \"bcccda30-71c6-4461-8d5f-545fa60cb9ab\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.081685 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcccda30-71c6-4461-8d5f-545fa60cb9ab-kube-api-access-dc7qz" (OuterVolumeSpecName: "kube-api-access-dc7qz") pod "bcccda30-71c6-4461-8d5f-545fa60cb9ab" (UID: "bcccda30-71c6-4461-8d5f-545fa60cb9ab"). InnerVolumeSpecName "kube-api-access-dc7qz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.083019 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "bcccda30-71c6-4461-8d5f-545fa60cb9ab" (UID: "bcccda30-71c6-4461-8d5f-545fa60cb9ab"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.126002 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2" path="/var/lib/kubelet/pods/2d4bdd7d-b693-4c99-a5bb-9d0a78ba3cd2/volumes" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.165603 4664 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.165632 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc7qz\" (UniqueName: \"kubernetes.io/projected/bcccda30-71c6-4461-8d5f-545fa60cb9ab-kube-api-access-dc7qz\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.203307 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-config" (OuterVolumeSpecName: "config") pod "bcccda30-71c6-4461-8d5f-545fa60cb9ab" (UID: "bcccda30-71c6-4461-8d5f-545fa60cb9ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.265316 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bcccda30-71c6-4461-8d5f-545fa60cb9ab" (UID: "bcccda30-71c6-4461-8d5f-545fa60cb9ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.268432 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.268465 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.390991 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "bcccda30-71c6-4461-8d5f-545fa60cb9ab" (UID: "bcccda30-71c6-4461-8d5f-545fa60cb9ab"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.474872 4664 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcccda30-71c6-4461-8d5f-545fa60cb9ab-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.693228 4664 generic.go:334] "Generic (PLEG): container finished" podID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerID="27beb3d557fd1cee8bf4feee30917e63015a5e2132eab765bd78b2d83572b6cf" exitCode=0 Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.693295 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" event={"ID":"4fa11d17-6316-4c38-88e0-a7a5c293b9d1","Type":"ContainerDied","Data":"27beb3d557fd1cee8bf4feee30917e63015a5e2132eab765bd78b2d83572b6cf"} Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.697188 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f726ffb-02b8-4628-b128-1422badcf6ae","Type":"ContainerStarted","Data":"ad9f11412f6d28c72f09680807737f39e0868beefb629f6cb6a55450db3ea5e8"} Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.698517 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b7789cdc8-2t2mz" event={"ID":"bcccda30-71c6-4461-8d5f-545fa60cb9ab","Type":"ContainerDied","Data":"175799aa812484a69ce3c2e0b45de95e91600a27146ea8c8bf1da8a7cc14cf8a"} Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.698540 4664 scope.go:117] "RemoveContainer" containerID="a02de48f31d1eb5dff6f95bfd76ac165fbeefd66d8e68acae06e902c9a28947f" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.698656 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b7789cdc8-2t2mz" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.793618 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.806896 4664 scope.go:117] "RemoveContainer" containerID="810b3204424935fa735cbf37a668d73b3a982f147986947fa806549e6c51fe64" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.832222 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b7789cdc8-2t2mz"] Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.849103 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-b7789cdc8-2t2mz"] Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.889616 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-svc\") pod \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.889702 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-sb\") pod \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.889742 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-config\") pod \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.889770 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-swift-storage-0\") pod \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.889838 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfc9r\" (UniqueName: \"kubernetes.io/projected/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-kube-api-access-gfc9r\") pod \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.889968 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-nb\") pod \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\" (UID: \"4fa11d17-6316-4c38-88e0-a7a5c293b9d1\") " Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.906102 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-kube-api-access-gfc9r" (OuterVolumeSpecName: "kube-api-access-gfc9r") pod "4fa11d17-6316-4c38-88e0-a7a5c293b9d1" (UID: "4fa11d17-6316-4c38-88e0-a7a5c293b9d1"). InnerVolumeSpecName "kube-api-access-gfc9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.983573 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4fa11d17-6316-4c38-88e0-a7a5c293b9d1" (UID: "4fa11d17-6316-4c38-88e0-a7a5c293b9d1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.993328 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-config" (OuterVolumeSpecName: "config") pod "4fa11d17-6316-4c38-88e0-a7a5c293b9d1" (UID: "4fa11d17-6316-4c38-88e0-a7a5c293b9d1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.995250 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4fa11d17-6316-4c38-88e0-a7a5c293b9d1" (UID: "4fa11d17-6316-4c38-88e0-a7a5c293b9d1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.995391 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.995404 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.995415 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfc9r\" (UniqueName: \"kubernetes.io/projected/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-kube-api-access-gfc9r\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:45 crc kubenswrapper[4664]: I1013 07:04:45.995425 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.026951 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4fa11d17-6316-4c38-88e0-a7a5c293b9d1" (UID: "4fa11d17-6316-4c38-88e0-a7a5c293b9d1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.038235 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4fa11d17-6316-4c38-88e0-a7a5c293b9d1" (UID: "4fa11d17-6316-4c38-88e0-a7a5c293b9d1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.097350 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.097556 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fa11d17-6316-4c38-88e0-a7a5c293b9d1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.206050 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-79f9447d46-mnb2r" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.270962 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6d67b46cc4-4zwj2"] Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.271390 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" containerID="cri-o://b5776d7a67e6f0092c9361b5c40ec63d9ad10dd6ea73c6047b94807e0dd74f39" gracePeriod=30 Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.271744 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api" containerID="cri-o://0c7c10d42ac82800460d5083107bad03d64f54e32a7163a2e1af4842853d3b60" gracePeriod=30 Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.718717 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f726ffb-02b8-4628-b128-1422badcf6ae","Type":"ContainerStarted","Data":"e5755fc7dfa0496ec087cd040624e7a442f4ba887d2d5f908ae0d7aebfe87a3f"} Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.735376 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fed0bdbf-7ada-4393-972d-e29465a603f9","Type":"ContainerStarted","Data":"0057dffe25092120226f3568a6fa3979743a1a3670deb37d0c927a082a6587f7"} Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.744429 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" event={"ID":"4fa11d17-6316-4c38-88e0-a7a5c293b9d1","Type":"ContainerDied","Data":"c516d7d4fe916b3dfddb274773ccc8c3c7b7159039c6d91102b5b10736389a0b"} Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.744496 4664 scope.go:117] "RemoveContainer" containerID="27beb3d557fd1cee8bf4feee30917e63015a5e2132eab765bd78b2d83572b6cf" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.744498 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65f7b65d4c-fmz64" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.770037 4664 generic.go:334] "Generic (PLEG): container finished" podID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerID="b5776d7a67e6f0092c9361b5c40ec63d9ad10dd6ea73c6047b94807e0dd74f39" exitCode=143 Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.770139 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d67b46cc4-4zwj2" event={"ID":"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5","Type":"ContainerDied","Data":"b5776d7a67e6f0092c9361b5c40ec63d9ad10dd6ea73c6047b94807e0dd74f39"} Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.783033 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.783016306 podStartE2EDuration="5.783016306s" podCreationTimestamp="2025-10-13 07:04:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:46.770381238 +0000 UTC m=+1094.457826430" watchObservedRunningTime="2025-10-13 07:04:46.783016306 +0000 UTC m=+1094.470461498" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.789123 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ff491711-d62f-403e-8e23-3fa1d4137432","Type":"ContainerStarted","Data":"764ed53a91604c3eb760ad3034214b347899c557047289adf49dfd661cd1e6f2"} Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.832933 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.832910859 podStartE2EDuration="5.832910859s" podCreationTimestamp="2025-10-13 07:04:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:46.818250148 +0000 UTC m=+1094.505695350" watchObservedRunningTime="2025-10-13 07:04:46.832910859 +0000 UTC m=+1094.520356051" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.839875 4664 scope.go:117] "RemoveContainer" containerID="c031c8becf6a1b66bbc7eb416253522dd40a6c91b4f52dca0f7a104741deabf5" Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.854110 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65f7b65d4c-fmz64"] Oct 13 07:04:46 crc kubenswrapper[4664]: I1013 07:04:46.898266 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65f7b65d4c-fmz64"] Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.059838 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" path="/var/lib/kubelet/pods/4fa11d17-6316-4c38-88e0-a7a5c293b9d1/volumes" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.060611 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" path="/var/lib/kubelet/pods/bcccda30-71c6-4461-8d5f-545fa60cb9ab/volumes" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.739589 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.739936 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.740647 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"edb368fb2b90a86f8b5cf8dbcdbe841a8c7ef1178d9d56a64c1acfb2e63d34c0"} pod="openstack/horizon-8487d6c5d4-cgnm9" containerMessage="Container horizon failed startup probe, will be restarted" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.740677 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" containerID="cri-o://edb368fb2b90a86f8b5cf8dbcdbe841a8c7ef1178d9d56a64c1acfb2e63d34c0" gracePeriod=30 Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.814513 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f726ffb-02b8-4628-b128-1422badcf6ae","Type":"ContainerStarted","Data":"7f22a2a24302edd8a9aa10db9f6dfca1b22418e9d8119eaf5e349f5331cd1087"} Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.814658 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.838525 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.838508178 podStartE2EDuration="4.838508178s" podCreationTimestamp="2025-10-13 07:04:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:47.830696029 +0000 UTC m=+1095.518141231" watchObservedRunningTime="2025-10-13 07:04:47.838508178 +0000 UTC m=+1095.525953370" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.872032 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.872374 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.873077 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"6dd7403fc2ce851531d7d9fd3780f8fa218eb32de5a0368ce4f3d96e46b9049b"} pod="openstack/horizon-7d78c558d-rjg4v" containerMessage="Container horizon failed startup probe, will be restarted" Oct 13 07:04:47 crc kubenswrapper[4664]: I1013 07:04:47.873180 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" containerID="cri-o://6dd7403fc2ce851531d7d9fd3780f8fa218eb32de5a0368ce4f3d96e46b9049b" gracePeriod=30 Oct 13 07:04:48 crc kubenswrapper[4664]: I1013 07:04:48.826228 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.339887 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.341314 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5b885874bd-tzm67" Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.494223 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.545407 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.848956 4664 generic.go:334] "Generic (PLEG): container finished" podID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerID="0c7c10d42ac82800460d5083107bad03d64f54e32a7163a2e1af4842853d3b60" exitCode=0 Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.849155 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="cinder-scheduler" containerID="cri-o://24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a" gracePeriod=30 Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.849449 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d67b46cc4-4zwj2" event={"ID":"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5","Type":"ContainerDied","Data":"0c7c10d42ac82800460d5083107bad03d64f54e32a7163a2e1af4842853d3b60"} Oct 13 07:04:49 crc kubenswrapper[4664]: I1013 07:04:49.849470 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="probe" containerID="cri-o://b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46" gracePeriod=30 Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.198553 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.223933 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data\") pod \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.223987 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-logs\") pod \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.224011 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2zwx\" (UniqueName: \"kubernetes.io/projected/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-kube-api-access-m2zwx\") pod \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.224103 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-combined-ca-bundle\") pod \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.224164 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data-custom\") pod \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\" (UID: \"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5\") " Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.225910 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-logs" (OuterVolumeSpecName: "logs") pod "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" (UID: "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.242120 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" (UID: "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.284472 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-kube-api-access-m2zwx" (OuterVolumeSpecName: "kube-api-access-m2zwx") pod "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" (UID: "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5"). InnerVolumeSpecName "kube-api-access-m2zwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.309150 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6559fb6d89-x6txc" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.315247 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" (UID: "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.341243 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.341288 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.341299 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.341308 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2zwx\" (UniqueName: \"kubernetes.io/projected/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-kube-api-access-m2zwx\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.364223 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data" (OuterVolumeSpecName: "config-data") pod "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" (UID: "a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.442325 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.861190 4664 generic.go:334] "Generic (PLEG): container finished" podID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerID="b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46" exitCode=0 Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.861277 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f4507fc8-d71b-40bd-bc28-e21d538fc081","Type":"ContainerDied","Data":"b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46"} Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.863746 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d67b46cc4-4zwj2" event={"ID":"a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5","Type":"ContainerDied","Data":"0c7161c7f8c9806e2440762a434e62070bafe65217dfc8a23ee5f7089e818e84"} Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.863808 4664 scope.go:117] "RemoveContainer" containerID="0c7c10d42ac82800460d5083107bad03d64f54e32a7163a2e1af4842853d3b60" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.863836 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6d67b46cc4-4zwj2" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.899236 4664 scope.go:117] "RemoveContainer" containerID="b5776d7a67e6f0092c9361b5c40ec63d9ad10dd6ea73c6047b94807e0dd74f39" Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.912099 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6d67b46cc4-4zwj2"] Oct 13 07:04:50 crc kubenswrapper[4664]: I1013 07:04:50.921640 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6d67b46cc4-4zwj2"] Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.059309 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" path="/var/lib/kubelet/pods/a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5/volumes" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.503532 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 13 07:04:51 crc kubenswrapper[4664]: E1013 07:04:51.504213 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-api" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.504297 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-api" Oct 13 07:04:51 crc kubenswrapper[4664]: E1013 07:04:51.504380 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-httpd" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.504433 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-httpd" Oct 13 07:04:51 crc kubenswrapper[4664]: E1013 07:04:51.504497 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.504551 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" Oct 13 07:04:51 crc kubenswrapper[4664]: E1013 07:04:51.504619 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.504685 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api" Oct 13 07:04:51 crc kubenswrapper[4664]: E1013 07:04:51.504748 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerName="dnsmasq-dns" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.504821 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerName="dnsmasq-dns" Oct 13 07:04:51 crc kubenswrapper[4664]: E1013 07:04:51.504880 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerName="init" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.504939 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerName="init" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.505154 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-api" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.505221 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.505280 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fa11d17-6316-4c38-88e0-a7a5c293b9d1" containerName="dnsmasq-dns" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.505343 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.505412 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcccda30-71c6-4461-8d5f-545fa60cb9ab" containerName="neutron-httpd" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.506099 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.509996 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.510045 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.510293 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-4kk9b" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.516235 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.559752 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/26880f4a-2c23-4107-86b6-937a82c2fcb1-openstack-config-secret\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.559877 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvc64\" (UniqueName: \"kubernetes.io/projected/26880f4a-2c23-4107-86b6-937a82c2fcb1-kube-api-access-fvc64\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.560108 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26880f4a-2c23-4107-86b6-937a82c2fcb1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.560200 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/26880f4a-2c23-4107-86b6-937a82c2fcb1-openstack-config\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.662010 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvc64\" (UniqueName: \"kubernetes.io/projected/26880f4a-2c23-4107-86b6-937a82c2fcb1-kube-api-access-fvc64\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.662076 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26880f4a-2c23-4107-86b6-937a82c2fcb1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.662102 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/26880f4a-2c23-4107-86b6-937a82c2fcb1-openstack-config\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.662177 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/26880f4a-2c23-4107-86b6-937a82c2fcb1-openstack-config-secret\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.664262 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/26880f4a-2c23-4107-86b6-937a82c2fcb1-openstack-config\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.667753 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/26880f4a-2c23-4107-86b6-937a82c2fcb1-openstack-config-secret\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.668414 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26880f4a-2c23-4107-86b6-937a82c2fcb1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.678526 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvc64\" (UniqueName: \"kubernetes.io/projected/26880f4a-2c23-4107-86b6-937a82c2fcb1-kube-api-access-fvc64\") pod \"openstackclient\" (UID: \"26880f4a-2c23-4107-86b6-937a82c2fcb1\") " pod="openstack/openstackclient" Oct 13 07:04:51 crc kubenswrapper[4664]: I1013 07:04:51.877093 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 13 07:04:52 crc kubenswrapper[4664]: E1013 07:04:52.143206 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4507fc8_d71b_40bd_bc28_e21d538fc081.slice/crio-24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4507fc8_d71b_40bd_bc28_e21d538fc081.slice/crio-conmon-24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a.scope\": RecentStats: unable to find data in memory cache]" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.377619 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 13 07:04:52 crc kubenswrapper[4664]: W1013 07:04:52.381271 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod26880f4a_2c23_4107_86b6_937a82c2fcb1.slice/crio-82cb0eb1adce371dd45439c6a564756fd0c235dec1fa415ffb64e75869d640fb WatchSource:0}: Error finding container 82cb0eb1adce371dd45439c6a564756fd0c235dec1fa415ffb64e75869d640fb: Status 404 returned error can't find the container with id 82cb0eb1adce371dd45439c6a564756fd0c235dec1fa415ffb64e75869d640fb Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.404405 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.405309 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.435610 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.454151 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.510311 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.600031 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.600081 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.639530 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.654195 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.689007 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg8xw\" (UniqueName: \"kubernetes.io/projected/f4507fc8-d71b-40bd-bc28-e21d538fc081-kube-api-access-qg8xw\") pod \"f4507fc8-d71b-40bd-bc28-e21d538fc081\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.689102 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data\") pod \"f4507fc8-d71b-40bd-bc28-e21d538fc081\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.689139 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data-custom\") pod \"f4507fc8-d71b-40bd-bc28-e21d538fc081\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.689169 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-combined-ca-bundle\") pod \"f4507fc8-d71b-40bd-bc28-e21d538fc081\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.689253 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4507fc8-d71b-40bd-bc28-e21d538fc081-etc-machine-id\") pod \"f4507fc8-d71b-40bd-bc28-e21d538fc081\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.689360 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-scripts\") pod \"f4507fc8-d71b-40bd-bc28-e21d538fc081\" (UID: \"f4507fc8-d71b-40bd-bc28-e21d538fc081\") " Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.690132 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4507fc8-d71b-40bd-bc28-e21d538fc081-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f4507fc8-d71b-40bd-bc28-e21d538fc081" (UID: "f4507fc8-d71b-40bd-bc28-e21d538fc081"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.696361 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-scripts" (OuterVolumeSpecName: "scripts") pod "f4507fc8-d71b-40bd-bc28-e21d538fc081" (UID: "f4507fc8-d71b-40bd-bc28-e21d538fc081"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.696363 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4507fc8-d71b-40bd-bc28-e21d538fc081-kube-api-access-qg8xw" (OuterVolumeSpecName: "kube-api-access-qg8xw") pod "f4507fc8-d71b-40bd-bc28-e21d538fc081" (UID: "f4507fc8-d71b-40bd-bc28-e21d538fc081"). InnerVolumeSpecName "kube-api-access-qg8xw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.698063 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f4507fc8-d71b-40bd-bc28-e21d538fc081" (UID: "f4507fc8-d71b-40bd-bc28-e21d538fc081"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.747423 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4507fc8-d71b-40bd-bc28-e21d538fc081" (UID: "f4507fc8-d71b-40bd-bc28-e21d538fc081"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.790375 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data" (OuterVolumeSpecName: "config-data") pod "f4507fc8-d71b-40bd-bc28-e21d538fc081" (UID: "f4507fc8-d71b-40bd-bc28-e21d538fc081"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.791862 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.791885 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.791895 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.791905 4664 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4507fc8-d71b-40bd-bc28-e21d538fc081-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.791913 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4507fc8-d71b-40bd-bc28-e21d538fc081-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.791921 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg8xw\" (UniqueName: \"kubernetes.io/projected/f4507fc8-d71b-40bd-bc28-e21d538fc081-kube-api-access-qg8xw\") on node \"crc\" DevicePath \"\"" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.910548 4664 generic.go:334] "Generic (PLEG): container finished" podID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerID="24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a" exitCode=0 Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.910608 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f4507fc8-d71b-40bd-bc28-e21d538fc081","Type":"ContainerDied","Data":"24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a"} Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.910636 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f4507fc8-d71b-40bd-bc28-e21d538fc081","Type":"ContainerDied","Data":"4f274253c3d9f7b6a2817513269d86be77e515a7420cafdab4f5e0a5ae6744ec"} Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.910652 4664 scope.go:117] "RemoveContainer" containerID="b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.910770 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.918150 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"26880f4a-2c23-4107-86b6-937a82c2fcb1","Type":"ContainerStarted","Data":"82cb0eb1adce371dd45439c6a564756fd0c235dec1fa415ffb64e75869d640fb"} Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.918191 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.919009 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.919030 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.919039 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.990331 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:52 crc kubenswrapper[4664]: I1013 07:04:52.997665 4664 scope.go:117] "RemoveContainer" containerID="24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.000826 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.011195 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:53 crc kubenswrapper[4664]: E1013 07:04:53.011640 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="probe" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.011664 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="probe" Oct 13 07:04:53 crc kubenswrapper[4664]: E1013 07:04:53.011723 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="cinder-scheduler" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.011732 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="cinder-scheduler" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.011951 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="cinder-scheduler" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.011980 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" containerName="probe" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.013010 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.022393 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.044106 4664 scope.go:117] "RemoveContainer" containerID="b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46" Oct 13 07:04:53 crc kubenswrapper[4664]: E1013 07:04:53.056011 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46\": container with ID starting with b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46 not found: ID does not exist" containerID="b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.056492 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46"} err="failed to get container status \"b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46\": rpc error: code = NotFound desc = could not find container \"b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46\": container with ID starting with b81b1821e9488838204e775af461c88ef2376d630458d7ad6ce05c2a092bcc46 not found: ID does not exist" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.057922 4664 scope.go:117] "RemoveContainer" containerID="24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a" Oct 13 07:04:53 crc kubenswrapper[4664]: E1013 07:04:53.061348 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a\": container with ID starting with 24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a not found: ID does not exist" containerID="24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.063073 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a"} err="failed to get container status \"24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a\": rpc error: code = NotFound desc = could not find container \"24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a\": container with ID starting with 24cb8f57a15c07f91f205206041e87fb90b685d17547fd27915fd303b50adc4a not found: ID does not exist" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.063227 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4507fc8-d71b-40bd-bc28-e21d538fc081" path="/var/lib/kubelet/pods/f4507fc8-d71b-40bd-bc28-e21d538fc081/volumes" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.063766 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.100002 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x285s\" (UniqueName: \"kubernetes.io/projected/fd97ddf9-5f06-4c91-af04-42c116fac89d-kube-api-access-x285s\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.100075 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd97ddf9-5f06-4c91-af04-42c116fac89d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.100109 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-config-data\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.100162 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-scripts\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.100198 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.100221 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.202729 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x285s\" (UniqueName: \"kubernetes.io/projected/fd97ddf9-5f06-4c91-af04-42c116fac89d-kube-api-access-x285s\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.202830 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd97ddf9-5f06-4c91-af04-42c116fac89d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.203118 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-config-data\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.203190 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-scripts\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.203242 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.203271 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.205510 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd97ddf9-5f06-4c91-af04-42c116fac89d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.218267 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-config-data\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.227571 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-scripts\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.228387 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.234216 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd97ddf9-5f06-4c91-af04-42c116fac89d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.245543 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x285s\" (UniqueName: \"kubernetes.io/projected/fd97ddf9-5f06-4c91-af04-42c116fac89d-kube-api-access-x285s\") pod \"cinder-scheduler-0\" (UID: \"fd97ddf9-5f06-4c91-af04-42c116fac89d\") " pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.341505 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.876596 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 13 07:04:53 crc kubenswrapper[4664]: I1013 07:04:53.968835 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fd97ddf9-5f06-4c91-af04-42c116fac89d","Type":"ContainerStarted","Data":"7a81567bfa841a0d813795fd2abdbe5ba2349585b912cf4d75a37a471dc11cb9"} Oct 13 07:04:54 crc kubenswrapper[4664]: I1013 07:04:54.985727 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:04:54 crc kubenswrapper[4664]: I1013 07:04:54.985992 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:04:54 crc kubenswrapper[4664]: I1013 07:04:54.985933 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:04:54 crc kubenswrapper[4664]: I1013 07:04:54.986774 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:04:54 crc kubenswrapper[4664]: I1013 07:04:54.985872 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fd97ddf9-5f06-4c91-af04-42c116fac89d","Type":"ContainerStarted","Data":"4a5f1ab1d2b02066b676a5f2f840925d5c727dd05eb8983b65bb0867c965af73"} Oct 13 07:04:55 crc kubenswrapper[4664]: I1013 07:04:55.023586 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:55 crc kubenswrapper[4664]: I1013 07:04:55.024658 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6d67b46cc4-4zwj2" podUID="a15b08ef-9c9a-44e9-bf8b-a6fa7c6238b5" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: i/o timeout (Client.Timeout exceeded while awaiting headers)" Oct 13 07:04:55 crc kubenswrapper[4664]: I1013 07:04:55.995037 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fd97ddf9-5f06-4c91-af04-42c116fac89d","Type":"ContainerStarted","Data":"0debfe0fa94080800bc588fe118bb56d6eeb733f4cdbff43040e0a69b53e8136"} Oct 13 07:04:56 crc kubenswrapper[4664]: I1013 07:04:56.023379 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.02336484 podStartE2EDuration="4.02336484s" podCreationTimestamp="2025-10-13 07:04:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:04:56.01850811 +0000 UTC m=+1103.705953302" watchObservedRunningTime="2025-10-13 07:04:56.02336484 +0000 UTC m=+1103.710810032" Oct 13 07:04:57 crc kubenswrapper[4664]: I1013 07:04:57.030679 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Oct 13 07:04:57 crc kubenswrapper[4664]: I1013 07:04:57.692424 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:57 crc kubenswrapper[4664]: I1013 07:04:57.692836 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:04:57 crc kubenswrapper[4664]: I1013 07:04:57.735067 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 13 07:04:57 crc kubenswrapper[4664]: I1013 07:04:57.827944 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 13 07:04:57 crc kubenswrapper[4664]: I1013 07:04:57.828035 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:04:57 crc kubenswrapper[4664]: I1013 07:04:57.828889 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 13 07:04:58 crc kubenswrapper[4664]: I1013 07:04:58.342126 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.756535 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-6657f6fcdb-mmtx6"] Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.765044 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.770955 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.771153 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.771285 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-vc7gh" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.802848 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6657f6fcdb-mmtx6"] Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.839718 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-combined-ca-bundle\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.839805 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg2jx\" (UniqueName: \"kubernetes.io/projected/de4b5d93-3b16-4103-8688-3365fc6302a8-kube-api-access-xg2jx\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.839866 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.839931 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data-custom\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.859184 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-6f7d4d9596-4hkft"] Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.879912 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6f7d4d9596-4hkft"] Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.880199 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.884323 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.941748 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg2jx\" (UniqueName: \"kubernetes.io/projected/de4b5d93-3b16-4103-8688-3365fc6302a8-kube-api-access-xg2jx\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.941828 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data-custom\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.941861 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.941887 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.941913 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kkkb\" (UniqueName: \"kubernetes.io/projected/19797988-adf9-40b6-8e07-f2e0869d3db6-kube-api-access-6kkkb\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.941957 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-combined-ca-bundle\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.941989 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data-custom\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.942010 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-combined-ca-bundle\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.958222 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-combined-ca-bundle\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.974578 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data-custom\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.975433 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.997285 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-864486cfc5-dvcpd"] Oct 13 07:04:59 crc kubenswrapper[4664]: I1013 07:04:59.999082 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.013543 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg2jx\" (UniqueName: \"kubernetes.io/projected/de4b5d93-3b16-4103-8688-3365fc6302a8-kube-api-access-xg2jx\") pod \"heat-engine-6657f6fcdb-mmtx6\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.022169 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864486cfc5-dvcpd"] Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.043732 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data-custom\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.043779 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.043832 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kkkb\" (UniqueName: \"kubernetes.io/projected/19797988-adf9-40b6-8e07-f2e0869d3db6-kube-api-access-6kkkb\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.043875 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-combined-ca-bundle\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.066861 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data-custom\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.086240 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5758f76974-8cm64"] Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.087789 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.089322 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.094637 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.096883 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-combined-ca-bundle\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.099400 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5758f76974-8cm64"] Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.102578 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.133634 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kkkb\" (UniqueName: \"kubernetes.io/projected/19797988-adf9-40b6-8e07-f2e0869d3db6-kube-api-access-6kkkb\") pod \"heat-cfnapi-6f7d4d9596-4hkft\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147545 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-nb\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147596 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-combined-ca-bundle\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147633 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-sb\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147670 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-svc\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147699 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5n8l\" (UniqueName: \"kubernetes.io/projected/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-kube-api-access-z5n8l\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147745 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-config\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147763 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-swift-storage-0\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147833 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56tw9\" (UniqueName: \"kubernetes.io/projected/99998d93-3c3d-4269-96b4-260b38f59814-kube-api-access-56tw9\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147855 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data-custom\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.147908 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.202594 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252071 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5n8l\" (UniqueName: \"kubernetes.io/projected/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-kube-api-access-z5n8l\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252126 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-config\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252147 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-swift-storage-0\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252204 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56tw9\" (UniqueName: \"kubernetes.io/projected/99998d93-3c3d-4269-96b4-260b38f59814-kube-api-access-56tw9\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252224 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data-custom\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252275 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252298 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-nb\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252328 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-combined-ca-bundle\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252354 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-sb\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.252386 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-svc\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.255125 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-swift-storage-0\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.259242 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-sb\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.259544 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-nb\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.260553 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-svc\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.266173 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.272721 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-config\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.277302 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-combined-ca-bundle\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.278878 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data-custom\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.287242 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5n8l\" (UniqueName: \"kubernetes.io/projected/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-kube-api-access-z5n8l\") pod \"heat-api-5758f76974-8cm64\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.328911 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56tw9\" (UniqueName: \"kubernetes.io/projected/99998d93-3c3d-4269-96b4-260b38f59814-kube-api-access-56tw9\") pod \"dnsmasq-dns-864486cfc5-dvcpd\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.546491 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.556598 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.793871 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6657f6fcdb-mmtx6"] Oct 13 07:05:00 crc kubenswrapper[4664]: W1013 07:05:00.824376 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde4b5d93_3b16_4103_8688_3365fc6302a8.slice/crio-99739b443e10e4e60446579ec52458e6c8e65084aa3e2cbb599c5ed517f7aee3 WatchSource:0}: Error finding container 99739b443e10e4e60446579ec52458e6c8e65084aa3e2cbb599c5ed517f7aee3: Status 404 returned error can't find the container with id 99739b443e10e4e60446579ec52458e6c8e65084aa3e2cbb599c5ed517f7aee3 Oct 13 07:05:00 crc kubenswrapper[4664]: I1013 07:05:00.922040 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6f7d4d9596-4hkft"] Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.108478 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6657f6fcdb-mmtx6" event={"ID":"de4b5d93-3b16-4103-8688-3365fc6302a8","Type":"ContainerStarted","Data":"99739b443e10e4e60446579ec52458e6c8e65084aa3e2cbb599c5ed517f7aee3"} Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.110373 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" event={"ID":"19797988-adf9-40b6-8e07-f2e0869d3db6","Type":"ContainerStarted","Data":"6f61f1dbe0ff42a3082d90102dcb4ea2251f5c06da2f4ea58237ffb852d30b7b"} Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.253452 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864486cfc5-dvcpd"] Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.387025 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5758f76974-8cm64"] Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.743933 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7445f97b5f-k8zxs"] Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.745439 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.751138 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.751445 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.751558 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.764420 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7445f97b5f-k8zxs"] Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.791918 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4132324-29a1-4d67-91a2-9b7ec6a7c960-log-httpd\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.791972 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b4132324-29a1-4d67-91a2-9b7ec6a7c960-etc-swift\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.791997 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr2nv\" (UniqueName: \"kubernetes.io/projected/b4132324-29a1-4d67-91a2-9b7ec6a7c960-kube-api-access-cr2nv\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.792076 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-internal-tls-certs\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.792096 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-config-data\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.792121 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-combined-ca-bundle\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.792140 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4132324-29a1-4d67-91a2-9b7ec6a7c960-run-httpd\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.792157 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-public-tls-certs\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.894334 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4132324-29a1-4d67-91a2-9b7ec6a7c960-log-httpd\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.895268 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b4132324-29a1-4d67-91a2-9b7ec6a7c960-etc-swift\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.895356 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr2nv\" (UniqueName: \"kubernetes.io/projected/b4132324-29a1-4d67-91a2-9b7ec6a7c960-kube-api-access-cr2nv\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.895457 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-internal-tls-certs\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.895528 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-config-data\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.895676 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-combined-ca-bundle\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.895749 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4132324-29a1-4d67-91a2-9b7ec6a7c960-run-httpd\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.895837 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-public-tls-certs\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.894873 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4132324-29a1-4d67-91a2-9b7ec6a7c960-log-httpd\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.898225 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4132324-29a1-4d67-91a2-9b7ec6a7c960-run-httpd\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.902606 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-public-tls-certs\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.903501 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-internal-tls-certs\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.909695 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-combined-ca-bundle\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.910327 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b4132324-29a1-4d67-91a2-9b7ec6a7c960-etc-swift\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.910381 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4132324-29a1-4d67-91a2-9b7ec6a7c960-config-data\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:01 crc kubenswrapper[4664]: I1013 07:05:01.924865 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr2nv\" (UniqueName: \"kubernetes.io/projected/b4132324-29a1-4d67-91a2-9b7ec6a7c960-kube-api-access-cr2nv\") pod \"swift-proxy-7445f97b5f-k8zxs\" (UID: \"b4132324-29a1-4d67-91a2-9b7ec6a7c960\") " pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.077242 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.158859 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5758f76974-8cm64" event={"ID":"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723","Type":"ContainerStarted","Data":"303261dd0260b601d87c3e9aed0c2b1e7aeb2e16082f3242816e62e00ee66ce4"} Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.178006 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6657f6fcdb-mmtx6" event={"ID":"de4b5d93-3b16-4103-8688-3365fc6302a8","Type":"ContainerStarted","Data":"4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6"} Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.178100 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.187485 4664 generic.go:334] "Generic (PLEG): container finished" podID="99998d93-3c3d-4269-96b4-260b38f59814" containerID="104777c2a029e611defe288496f6f0ee2ce93c302888d0828c5589e0589523ec" exitCode=0 Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.187528 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" event={"ID":"99998d93-3c3d-4269-96b4-260b38f59814","Type":"ContainerDied","Data":"104777c2a029e611defe288496f6f0ee2ce93c302888d0828c5589e0589523ec"} Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.187558 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" event={"ID":"99998d93-3c3d-4269-96b4-260b38f59814","Type":"ContainerStarted","Data":"53f862947320f9c575b1dbc672472ee5965db5e4150919d5365672258c93d324"} Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.198343 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-6657f6fcdb-mmtx6" podStartSLOduration=3.1983311739999998 podStartE2EDuration="3.198331174s" podCreationTimestamp="2025-10-13 07:04:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:02.196638648 +0000 UTC m=+1109.884083840" watchObservedRunningTime="2025-10-13 07:05:02.198331174 +0000 UTC m=+1109.885776366" Oct 13 07:05:02 crc kubenswrapper[4664]: I1013 07:05:02.798835 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7445f97b5f-k8zxs"] Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.217485 4664 generic.go:334] "Generic (PLEG): container finished" podID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerID="3e98e310e9273a988cb52efff2ff3e2fde492c3c614dc7f1aae5eacb75813db4" exitCode=137 Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.217611 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerDied","Data":"3e98e310e9273a988cb52efff2ff3e2fde492c3c614dc7f1aae5eacb75813db4"} Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.230610 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" event={"ID":"99998d93-3c3d-4269-96b4-260b38f59814","Type":"ContainerStarted","Data":"7120fe422ce19c2f83735f21bb721fb0dc8fc471ff51697fa259180ba2fe25e1"} Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.230709 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.260372 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" podStartSLOduration=4.260356026 podStartE2EDuration="4.260356026s" podCreationTimestamp="2025-10-13 07:04:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:03.2545719 +0000 UTC m=+1110.942017102" watchObservedRunningTime="2025-10-13 07:05:03.260356026 +0000 UTC m=+1110.947801218" Oct 13 07:05:03 crc kubenswrapper[4664]: W1013 07:05:03.655366 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4132324_29a1_4d67_91a2_9b7ec6a7c960.slice/crio-a272cf2c098c0fa392638f5c34394b90d38aeaefe68a0b355279de7e30a6a49b WatchSource:0}: Error finding container a272cf2c098c0fa392638f5c34394b90d38aeaefe68a0b355279de7e30a6a49b: Status 404 returned error can't find the container with id a272cf2c098c0fa392638f5c34394b90d38aeaefe68a0b355279de7e30a6a49b Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.706045 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.946475 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.946688 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-log" containerID="cri-o://ff665f3fc9ab4ed4b95c7d9e6e27533265c4c868ec292889fd035852a37009a4" gracePeriod=30 Oct 13 07:05:03 crc kubenswrapper[4664]: I1013 07:05:03.947120 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-httpd" containerID="cri-o://0057dffe25092120226f3568a6fa3979743a1a3670deb37d0c927a082a6587f7" gracePeriod=30 Oct 13 07:05:04 crc kubenswrapper[4664]: I1013 07:05:04.240490 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7445f97b5f-k8zxs" event={"ID":"b4132324-29a1-4d67-91a2-9b7ec6a7c960","Type":"ContainerStarted","Data":"a272cf2c098c0fa392638f5c34394b90d38aeaefe68a0b355279de7e30a6a49b"} Oct 13 07:05:04 crc kubenswrapper[4664]: I1013 07:05:04.245315 4664 generic.go:334] "Generic (PLEG): container finished" podID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerID="ff665f3fc9ab4ed4b95c7d9e6e27533265c4c868ec292889fd035852a37009a4" exitCode=143 Oct 13 07:05:04 crc kubenswrapper[4664]: I1013 07:05:04.245393 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fed0bdbf-7ada-4393-972d-e29465a603f9","Type":"ContainerDied","Data":"ff665f3fc9ab4ed4b95c7d9e6e27533265c4c868ec292889fd035852a37009a4"} Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.489015 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-tdsgr"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.490651 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-tdsgr" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.510649 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7hlb\" (UniqueName: \"kubernetes.io/projected/417f8a42-5f63-4637-9644-ebb89537f1be-kube-api-access-l7hlb\") pod \"nova-api-db-create-tdsgr\" (UID: \"417f8a42-5f63-4637-9644-ebb89537f1be\") " pod="openstack/nova-api-db-create-tdsgr" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.514146 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-tdsgr"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.612653 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7hlb\" (UniqueName: \"kubernetes.io/projected/417f8a42-5f63-4637-9644-ebb89537f1be-kube-api-access-l7hlb\") pod \"nova-api-db-create-tdsgr\" (UID: \"417f8a42-5f63-4637-9644-ebb89537f1be\") " pod="openstack/nova-api-db-create-tdsgr" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.638657 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7hlb\" (UniqueName: \"kubernetes.io/projected/417f8a42-5f63-4637-9644-ebb89537f1be-kube-api-access-l7hlb\") pod \"nova-api-db-create-tdsgr\" (UID: \"417f8a42-5f63-4637-9644-ebb89537f1be\") " pod="openstack/nova-api-db-create-tdsgr" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.689192 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-dzd5z"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.693902 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dzd5z" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.713921 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrnjl\" (UniqueName: \"kubernetes.io/projected/20839d87-6d9f-4137-a447-9da5d1523e9c-kube-api-access-xrnjl\") pod \"nova-cell0-db-create-dzd5z\" (UID: \"20839d87-6d9f-4137-a447-9da5d1523e9c\") " pod="openstack/nova-cell0-db-create-dzd5z" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.720509 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-dzd5z"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.793975 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-jw2p6"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.795220 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jw2p6" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.806364 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-jw2p6"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.807991 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-tdsgr" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.816511 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrnjl\" (UniqueName: \"kubernetes.io/projected/20839d87-6d9f-4137-a447-9da5d1523e9c-kube-api-access-xrnjl\") pod \"nova-cell0-db-create-dzd5z\" (UID: \"20839d87-6d9f-4137-a447-9da5d1523e9c\") " pod="openstack/nova-cell0-db-create-dzd5z" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.816659 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cf9j\" (UniqueName: \"kubernetes.io/projected/bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e-kube-api-access-2cf9j\") pod \"nova-cell1-db-create-jw2p6\" (UID: \"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e\") " pod="openstack/nova-cell1-db-create-jw2p6" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.842337 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrnjl\" (UniqueName: \"kubernetes.io/projected/20839d87-6d9f-4137-a447-9da5d1523e9c-kube-api-access-xrnjl\") pod \"nova-cell0-db-create-dzd5z\" (UID: \"20839d87-6d9f-4137-a447-9da5d1523e9c\") " pod="openstack/nova-cell0-db-create-dzd5z" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.895738 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-9c86db6bb-5dfhm"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.897325 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.903995 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-9c86db6bb-5dfhm"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.908770 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-9df6477f-n8297"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.910459 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.919329 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-config-data-custom\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.919515 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rtk8\" (UniqueName: \"kubernetes.io/projected/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-kube-api-access-6rtk8\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.919610 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-combined-ca-bundle\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.919742 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-config-data\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.929315 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cf9j\" (UniqueName: \"kubernetes.io/projected/bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e-kube-api-access-2cf9j\") pod \"nova-cell1-db-create-jw2p6\" (UID: \"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e\") " pod="openstack/nova-cell1-db-create-jw2p6" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.941480 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-754ccf95b4-8cxkq"] Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.943248 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.956515 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cf9j\" (UniqueName: \"kubernetes.io/projected/bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e-kube-api-access-2cf9j\") pod \"nova-cell1-db-create-jw2p6\" (UID: \"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e\") " pod="openstack/nova-cell1-db-create-jw2p6" Oct 13 07:05:06 crc kubenswrapper[4664]: I1013 07:05:06.978888 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-9df6477f-n8297"] Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.003379 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-754ccf95b4-8cxkq"] Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.018393 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dzd5z" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.031779 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-config-data\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.031835 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data-custom\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.031853 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.031888 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pqpz\" (UniqueName: \"kubernetes.io/projected/6136e7f8-cad7-44a1-b237-70f4e309366c-kube-api-access-4pqpz\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.031975 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-config-data-custom\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.032003 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-combined-ca-bundle\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.032019 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-combined-ca-bundle\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.032040 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rtk8\" (UniqueName: \"kubernetes.io/projected/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-kube-api-access-6rtk8\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.032058 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.032076 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-combined-ca-bundle\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.032100 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxjz9\" (UniqueName: \"kubernetes.io/projected/9762d9c5-1f78-4001-93dd-fb76914d6523-kube-api-access-sxjz9\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.032143 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data-custom\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.050492 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-config-data\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.061750 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-combined-ca-bundle\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.063260 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rtk8\" (UniqueName: \"kubernetes.io/projected/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-kube-api-access-6rtk8\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.065262 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d2d2de6d-b7be-4b17-a73c-33d7dacf228c-config-data-custom\") pod \"heat-engine-9c86db6bb-5dfhm\" (UID: \"d2d2de6d-b7be-4b17-a73c-33d7dacf228c\") " pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.114841 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jw2p6" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134476 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-combined-ca-bundle\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134519 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-combined-ca-bundle\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134552 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134581 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxjz9\" (UniqueName: \"kubernetes.io/projected/9762d9c5-1f78-4001-93dd-fb76914d6523-kube-api-access-sxjz9\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134632 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data-custom\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134670 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data-custom\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134687 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.134725 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pqpz\" (UniqueName: \"kubernetes.io/projected/6136e7f8-cad7-44a1-b237-70f4e309366c-kube-api-access-4pqpz\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.139592 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-combined-ca-bundle\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.145624 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.146141 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-combined-ca-bundle\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.147393 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data-custom\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.148517 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data-custom\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.154157 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.155353 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pqpz\" (UniqueName: \"kubernetes.io/projected/6136e7f8-cad7-44a1-b237-70f4e309366c-kube-api-access-4pqpz\") pod \"heat-api-9df6477f-n8297\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.159203 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxjz9\" (UniqueName: \"kubernetes.io/projected/9762d9c5-1f78-4001-93dd-fb76914d6523-kube-api-access-sxjz9\") pod \"heat-cfnapi-754ccf95b4-8cxkq\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.232267 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.278974 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.279932 4664 generic.go:334] "Generic (PLEG): container finished" podID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerID="0057dffe25092120226f3568a6fa3979743a1a3670deb37d0c927a082a6587f7" exitCode=0 Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.279963 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fed0bdbf-7ada-4393-972d-e29465a603f9","Type":"ContainerDied","Data":"0057dffe25092120226f3568a6fa3979743a1a3670deb37d0c927a082a6587f7"} Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.321031 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.600163 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.600400 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-log" containerID="cri-o://855fa1e1e273a560a58855113698758f8ae69c8f0fd44854faa09d3258c67c51" gracePeriod=30 Oct 13 07:05:07 crc kubenswrapper[4664]: I1013 07:05:07.600437 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-httpd" containerID="cri-o://764ed53a91604c3eb760ad3034214b347899c557047289adf49dfd661cd1e6f2" gracePeriod=30 Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.293324 4664 generic.go:334] "Generic (PLEG): container finished" podID="ff491711-d62f-403e-8e23-3fa1d4137432" containerID="855fa1e1e273a560a58855113698758f8ae69c8f0fd44854faa09d3258c67c51" exitCode=143 Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.293366 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ff491711-d62f-403e-8e23-3fa1d4137432","Type":"ContainerDied","Data":"855fa1e1e273a560a58855113698758f8ae69c8f0fd44854faa09d3258c67c51"} Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.653541 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5758f76974-8cm64"] Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.667514 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6f7d4d9596-4hkft"] Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.688783 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-66b9b87ff8-xsb2c"] Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.690545 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.696633 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.696803 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.717449 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-66b9b87ff8-xsb2c"] Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.729425 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-564db64b9c-vkx7h"] Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.733995 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.736666 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.736857 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765021 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-config-data-custom\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765103 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htpph\" (UniqueName: \"kubernetes.io/projected/23693d9c-d47d-406d-864d-fa1c94b8f381-kube-api-access-htpph\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765130 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-combined-ca-bundle\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765155 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-config-data\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765175 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-config-data\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765194 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-public-tls-certs\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765211 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-internal-tls-certs\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765234 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-internal-tls-certs\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765248 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-public-tls-certs\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765262 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr5r8\" (UniqueName: \"kubernetes.io/projected/10a9562e-4b1b-4183-bb7a-c3f934109c1d-kube-api-access-nr5r8\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765306 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-combined-ca-bundle\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.765330 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-config-data-custom\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.776089 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-564db64b9c-vkx7h"] Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868234 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-config-data\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868293 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-config-data\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868330 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-public-tls-certs\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868358 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-internal-tls-certs\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868393 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-internal-tls-certs\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868415 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr5r8\" (UniqueName: \"kubernetes.io/projected/10a9562e-4b1b-4183-bb7a-c3f934109c1d-kube-api-access-nr5r8\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868434 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-public-tls-certs\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868500 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-combined-ca-bundle\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868535 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-config-data-custom\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868579 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-config-data-custom\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868664 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htpph\" (UniqueName: \"kubernetes.io/projected/23693d9c-d47d-406d-864d-fa1c94b8f381-kube-api-access-htpph\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.868696 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-combined-ca-bundle\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.875579 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-internal-tls-certs\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.876346 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-config-data\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.876779 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-config-data-custom\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.876903 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-public-tls-certs\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.877415 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-combined-ca-bundle\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.880518 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-config-data\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.881073 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-internal-tls-certs\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.893308 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr5r8\" (UniqueName: \"kubernetes.io/projected/10a9562e-4b1b-4183-bb7a-c3f934109c1d-kube-api-access-nr5r8\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.896482 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-combined-ca-bundle\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.897476 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/10a9562e-4b1b-4183-bb7a-c3f934109c1d-config-data-custom\") pod \"heat-cfnapi-564db64b9c-vkx7h\" (UID: \"10a9562e-4b1b-4183-bb7a-c3f934109c1d\") " pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.902582 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/23693d9c-d47d-406d-864d-fa1c94b8f381-public-tls-certs\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:08 crc kubenswrapper[4664]: I1013 07:05:08.902759 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htpph\" (UniqueName: \"kubernetes.io/projected/23693d9c-d47d-406d-864d-fa1c94b8f381-kube-api-access-htpph\") pod \"heat-api-66b9b87ff8-xsb2c\" (UID: \"23693d9c-d47d-406d-864d-fa1c94b8f381\") " pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:09 crc kubenswrapper[4664]: I1013 07:05:09.016575 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:09 crc kubenswrapper[4664]: I1013 07:05:09.072896 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:10 crc kubenswrapper[4664]: I1013 07:05:10.549077 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:05:10 crc kubenswrapper[4664]: I1013 07:05:10.636970 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56c55fd88c-shnsp"] Oct 13 07:05:10 crc kubenswrapper[4664]: I1013 07:05:10.637190 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" podUID="f450f736-0236-4e48-b810-e09e43815bc9" containerName="dnsmasq-dns" containerID="cri-o://df54618c28d4adf7d70187369bec6d5e5cf73b1758c53ca0c2b4938ffc18a235" gracePeriod=10 Oct 13 07:05:11 crc kubenswrapper[4664]: I1013 07:05:11.339393 4664 generic.go:334] "Generic (PLEG): container finished" podID="ff491711-d62f-403e-8e23-3fa1d4137432" containerID="764ed53a91604c3eb760ad3034214b347899c557047289adf49dfd661cd1e6f2" exitCode=0 Oct 13 07:05:11 crc kubenswrapper[4664]: I1013 07:05:11.339424 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ff491711-d62f-403e-8e23-3fa1d4137432","Type":"ContainerDied","Data":"764ed53a91604c3eb760ad3034214b347899c557047289adf49dfd661cd1e6f2"} Oct 13 07:05:11 crc kubenswrapper[4664]: I1013 07:05:11.345593 4664 generic.go:334] "Generic (PLEG): container finished" podID="f450f736-0236-4e48-b810-e09e43815bc9" containerID="df54618c28d4adf7d70187369bec6d5e5cf73b1758c53ca0c2b4938ffc18a235" exitCode=0 Oct 13 07:05:11 crc kubenswrapper[4664]: I1013 07:05:11.345633 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" event={"ID":"f450f736-0236-4e48-b810-e09e43815bc9","Type":"ContainerDied","Data":"df54618c28d4adf7d70187369bec6d5e5cf73b1758c53ca0c2b4938ffc18a235"} Oct 13 07:05:12 crc kubenswrapper[4664]: I1013 07:05:12.405851 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.171:9292/healthcheck\": dial tcp 10.217.0.171:9292: connect: connection refused" Oct 13 07:05:12 crc kubenswrapper[4664]: I1013 07:05:12.405896 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.171:9292/healthcheck\": dial tcp 10.217.0.171:9292: connect: connection refused" Oct 13 07:05:12 crc kubenswrapper[4664]: I1013 07:05:12.603131 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.170:9292/healthcheck\": dial tcp 10.217.0.170:9292: connect: connection refused" Oct 13 07:05:12 crc kubenswrapper[4664]: I1013 07:05:12.603132 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.170:9292/healthcheck\": dial tcp 10.217.0.170:9292: connect: connection refused" Oct 13 07:05:12 crc kubenswrapper[4664]: I1013 07:05:12.979830 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.071944 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-combined-ca-bundle\") pod \"da4413cf-00ec-4092-a86a-be0874b30c2c\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.072119 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-log-httpd\") pod \"da4413cf-00ec-4092-a86a-be0874b30c2c\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.072188 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-sg-core-conf-yaml\") pod \"da4413cf-00ec-4092-a86a-be0874b30c2c\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.072312 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-config-data\") pod \"da4413cf-00ec-4092-a86a-be0874b30c2c\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.072408 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-scripts\") pod \"da4413cf-00ec-4092-a86a-be0874b30c2c\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.072432 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-run-httpd\") pod \"da4413cf-00ec-4092-a86a-be0874b30c2c\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.072465 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wqjvj\" (UniqueName: \"kubernetes.io/projected/da4413cf-00ec-4092-a86a-be0874b30c2c-kube-api-access-wqjvj\") pod \"da4413cf-00ec-4092-a86a-be0874b30c2c\" (UID: \"da4413cf-00ec-4092-a86a-be0874b30c2c\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.073458 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "da4413cf-00ec-4092-a86a-be0874b30c2c" (UID: "da4413cf-00ec-4092-a86a-be0874b30c2c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.077425 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "da4413cf-00ec-4092-a86a-be0874b30c2c" (UID: "da4413cf-00ec-4092-a86a-be0874b30c2c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.089863 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-scripts" (OuterVolumeSpecName: "scripts") pod "da4413cf-00ec-4092-a86a-be0874b30c2c" (UID: "da4413cf-00ec-4092-a86a-be0874b30c2c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.097888 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da4413cf-00ec-4092-a86a-be0874b30c2c-kube-api-access-wqjvj" (OuterVolumeSpecName: "kube-api-access-wqjvj") pod "da4413cf-00ec-4092-a86a-be0874b30c2c" (UID: "da4413cf-00ec-4092-a86a-be0874b30c2c"). InnerVolumeSpecName "kube-api-access-wqjvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.135062 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "da4413cf-00ec-4092-a86a-be0874b30c2c" (UID: "da4413cf-00ec-4092-a86a-be0874b30c2c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.191774 4664 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.191829 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.191840 4664 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.191851 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wqjvj\" (UniqueName: \"kubernetes.io/projected/da4413cf-00ec-4092-a86a-be0874b30c2c-kube-api-access-wqjvj\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.191863 4664 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da4413cf-00ec-4092-a86a-be0874b30c2c-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.247981 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da4413cf-00ec-4092-a86a-be0874b30c2c" (UID: "da4413cf-00ec-4092-a86a-be0874b30c2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.278365 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-config-data" (OuterVolumeSpecName: "config-data") pod "da4413cf-00ec-4092-a86a-be0874b30c2c" (UID: "da4413cf-00ec-4092-a86a-be0874b30c2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.295119 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.295146 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da4413cf-00ec-4092-a86a-be0874b30c2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.399458 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da4413cf-00ec-4092-a86a-be0874b30c2c","Type":"ContainerDied","Data":"143f1d056ee23413e45733d1d0364d11485b942f83d8739d5a6b0a5d248c22f5"} Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.399501 4664 scope.go:117] "RemoveContainer" containerID="3e98e310e9273a988cb52efff2ff3e2fde492c3c614dc7f1aae5eacb75813db4" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.399608 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.498118 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.523094 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.553641 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:13 crc kubenswrapper[4664]: E1013 07:05:13.554112 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="ceilometer-notification-agent" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.554125 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="ceilometer-notification-agent" Oct 13 07:05:13 crc kubenswrapper[4664]: E1013 07:05:13.554143 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="sg-core" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.554149 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="sg-core" Oct 13 07:05:13 crc kubenswrapper[4664]: E1013 07:05:13.554177 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="proxy-httpd" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.554184 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="proxy-httpd" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.554347 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="sg-core" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.554366 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="ceilometer-notification-agent" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.554375 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" containerName="proxy-httpd" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.557667 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.563851 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.570112 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.570214 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.600321 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m67mb\" (UniqueName: \"kubernetes.io/projected/869b5463-3457-4467-b860-33f66c30e334-kube-api-access-m67mb\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.600386 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-run-httpd\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.600452 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.600480 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-scripts\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.600601 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-config-data\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.600671 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-log-httpd\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.600701 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.684090 4664 scope.go:117] "RemoveContainer" containerID="51601bdca4f7d778209c81f850c87649d1cb6bfc7959dddcc21d5b46b7c5c604" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.684309 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.689402 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701002 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701052 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-svc\") pod \"f450f736-0236-4e48-b810-e09e43815bc9\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701101 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-scripts\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701148 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-swift-storage-0\") pod \"f450f736-0236-4e48-b810-e09e43815bc9\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701165 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-config\") pod \"f450f736-0236-4e48-b810-e09e43815bc9\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701182 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-sb\") pod \"f450f736-0236-4e48-b810-e09e43815bc9\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701204 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-logs\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701224 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g68jh\" (UniqueName: \"kubernetes.io/projected/f450f736-0236-4e48-b810-e09e43815bc9-kube-api-access-g68jh\") pod \"f450f736-0236-4e48-b810-e09e43815bc9\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701242 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-combined-ca-bundle\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701261 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-nb\") pod \"f450f736-0236-4e48-b810-e09e43815bc9\" (UID: \"f450f736-0236-4e48-b810-e09e43815bc9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701288 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-httpd-run\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701304 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-config-data\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701322 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-internal-tls-certs\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701350 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctp88\" (UniqueName: \"kubernetes.io/projected/fed0bdbf-7ada-4393-972d-e29465a603f9-kube-api-access-ctp88\") pod \"fed0bdbf-7ada-4393-972d-e29465a603f9\" (UID: \"fed0bdbf-7ada-4393-972d-e29465a603f9\") " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701474 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-config-data\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701520 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-log-httpd\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701555 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701604 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m67mb\" (UniqueName: \"kubernetes.io/projected/869b5463-3457-4467-b860-33f66c30e334-kube-api-access-m67mb\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701622 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-run-httpd\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701657 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.701677 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-scripts\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.704811 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-log-httpd\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.714001 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-run-httpd\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.714398 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.727072 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-logs" (OuterVolumeSpecName: "logs") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.760269 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-scripts\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.760661 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-config-data\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.767828 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-scripts" (OuterVolumeSpecName: "scripts") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.782583 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.783067 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f450f736-0236-4e48-b810-e09e43815bc9-kube-api-access-g68jh" (OuterVolumeSpecName: "kube-api-access-g68jh") pod "f450f736-0236-4e48-b810-e09e43815bc9" (UID: "f450f736-0236-4e48-b810-e09e43815bc9"). InnerVolumeSpecName "kube-api-access-g68jh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.783124 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.804758 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.804974 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g68jh\" (UniqueName: \"kubernetes.io/projected/f450f736-0236-4e48-b810-e09e43815bc9-kube-api-access-g68jh\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.805081 4664 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fed0bdbf-7ada-4393-972d-e29465a603f9-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.805180 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.805256 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.807937 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.808301 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.808775 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m67mb\" (UniqueName: \"kubernetes.io/projected/869b5463-3457-4467-b860-33f66c30e334-kube-api-access-m67mb\") pod \"ceilometer-0\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.810363 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fed0bdbf-7ada-4393-972d-e29465a603f9-kube-api-access-ctp88" (OuterVolumeSpecName: "kube-api-access-ctp88") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "kube-api-access-ctp88". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.862424 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f450f736-0236-4e48-b810-e09e43815bc9" (UID: "f450f736-0236-4e48-b810-e09e43815bc9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.894228 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.907018 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.907049 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctp88\" (UniqueName: \"kubernetes.io/projected/fed0bdbf-7ada-4393-972d-e29465a603f9-kube-api-access-ctp88\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.907060 4664 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.907070 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.955878 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:13 crc kubenswrapper[4664]: I1013 07:05:13.956056 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f450f736-0236-4e48-b810-e09e43815bc9" (UID: "f450f736-0236-4e48-b810-e09e43815bc9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.010863 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.021813 4664 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.021850 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.088389 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f450f736-0236-4e48-b810-e09e43815bc9" (UID: "f450f736-0236-4e48-b810-e09e43815bc9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.125042 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.134259 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f450f736-0236-4e48-b810-e09e43815bc9" (UID: "f450f736-0236-4e48-b810-e09e43815bc9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.172352 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-config" (OuterVolumeSpecName: "config") pod "f450f736-0236-4e48-b810-e09e43815bc9" (UID: "f450f736-0236-4e48-b810-e09e43815bc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.215947 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-config-data" (OuterVolumeSpecName: "config-data") pod "fed0bdbf-7ada-4393-972d-e29465a603f9" (UID: "fed0bdbf-7ada-4393-972d-e29465a603f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.227033 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.227055 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f450f736-0236-4e48-b810-e09e43815bc9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.227063 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fed0bdbf-7ada-4393-972d-e29465a603f9-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.340732 4664 scope.go:117] "RemoveContainer" containerID="0bad5cc6c81cb37888da1bd0712f04802297fd0a68a2dfb49c78826e80a749b2" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.427703 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7445f97b5f-k8zxs" event={"ID":"b4132324-29a1-4d67-91a2-9b7ec6a7c960","Type":"ContainerStarted","Data":"6e2017da0ecac4cd85753d8503c87b3f0f6ea9ace9b0b8219f55313c5c65aecc"} Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.432264 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" event={"ID":"f450f736-0236-4e48-b810-e09e43815bc9","Type":"ContainerDied","Data":"80a5040c576a65a37ad4a9bc641037ae53716298957e5eceb7d3ace5b95d338c"} Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.432338 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c55fd88c-shnsp" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.439037 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fed0bdbf-7ada-4393-972d-e29465a603f9","Type":"ContainerDied","Data":"cc7d0b814a543ba991cb3017f52a08b0f0b680ef033863b3be27cd4f8347394d"} Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.439835 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.531575 4664 scope.go:117] "RemoveContainer" containerID="df54618c28d4adf7d70187369bec6d5e5cf73b1758c53ca0c2b4938ffc18a235" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.564977 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56c55fd88c-shnsp"] Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.641243 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56c55fd88c-shnsp"] Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.714205 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.721173 4664 scope.go:117] "RemoveContainer" containerID="150e68804f6b155dc19a6939cc5d754ecb3ee9ea75baf72a5677cb35915e9dc8" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.787086 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.795858 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:05:14 crc kubenswrapper[4664]: E1013 07:05:14.796311 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f450f736-0236-4e48-b810-e09e43815bc9" containerName="init" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.796324 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f450f736-0236-4e48-b810-e09e43815bc9" containerName="init" Oct 13 07:05:14 crc kubenswrapper[4664]: E1013 07:05:14.796347 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f450f736-0236-4e48-b810-e09e43815bc9" containerName="dnsmasq-dns" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.796353 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f450f736-0236-4e48-b810-e09e43815bc9" containerName="dnsmasq-dns" Oct 13 07:05:14 crc kubenswrapper[4664]: E1013 07:05:14.796363 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-httpd" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.796369 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-httpd" Oct 13 07:05:14 crc kubenswrapper[4664]: E1013 07:05:14.796378 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-log" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.796384 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-log" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.796556 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-httpd" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.796566 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f450f736-0236-4e48-b810-e09e43815bc9" containerName="dnsmasq-dns" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.796577 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" containerName="glance-log" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.797527 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.807385 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.807584 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.821399 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.916368 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-564db64b9c-vkx7h"] Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950338 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950397 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950440 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97b5bee2-4876-4d80-97cf-53c8d91521d9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950518 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950553 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950605 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn26k\" (UniqueName: \"kubernetes.io/projected/97b5bee2-4876-4d80-97cf-53c8d91521d9-kube-api-access-fn26k\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950642 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.950691 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97b5bee2-4876-4d80-97cf-53c8d91521d9-logs\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:14 crc kubenswrapper[4664]: I1013 07:05:14.961959 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-tdsgr"] Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.039664 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-66b9b87ff8-xsb2c"] Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052506 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052570 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052615 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn26k\" (UniqueName: \"kubernetes.io/projected/97b5bee2-4876-4d80-97cf-53c8d91521d9-kube-api-access-fn26k\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052655 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052683 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97b5bee2-4876-4d80-97cf-53c8d91521d9-logs\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052729 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052749 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.052773 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97b5bee2-4876-4d80-97cf-53c8d91521d9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.053354 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97b5bee2-4876-4d80-97cf-53c8d91521d9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.053698 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.069841 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.070259 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97b5bee2-4876-4d80-97cf-53c8d91521d9-logs\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.078555 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.100871 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da4413cf-00ec-4092-a86a-be0874b30c2c" path="/var/lib/kubelet/pods/da4413cf-00ec-4092-a86a-be0874b30c2c/volumes" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.101539 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f450f736-0236-4e48-b810-e09e43815bc9" path="/var/lib/kubelet/pods/f450f736-0236-4e48-b810-e09e43815bc9/volumes" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.102167 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fed0bdbf-7ada-4393-972d-e29465a603f9" path="/var/lib/kubelet/pods/fed0bdbf-7ada-4393-972d-e29465a603f9/volumes" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.104954 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.114386 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97b5bee2-4876-4d80-97cf-53c8d91521d9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.119816 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn26k\" (UniqueName: \"kubernetes.io/projected/97b5bee2-4876-4d80-97cf-53c8d91521d9-kube-api-access-fn26k\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.235453 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"97b5bee2-4876-4d80-97cf-53c8d91521d9\") " pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.304859 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-9df6477f-n8297"] Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.363346 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-jw2p6"] Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.372904 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-dzd5z"] Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.384857 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-754ccf95b4-8cxkq"] Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.411690 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:15 crc kubenswrapper[4664]: W1013 07:05:15.435766 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6136e7f8_cad7_44a1_b237_70f4e309366c.slice/crio-82a27cca59003fdd0b42be0848a0327b33bef9614ac89d1470655d26432afd20 WatchSource:0}: Error finding container 82a27cca59003fdd0b42be0848a0327b33bef9614ac89d1470655d26432afd20: Status 404 returned error can't find the container with id 82a27cca59003fdd0b42be0848a0327b33bef9614ac89d1470655d26432afd20 Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.468575 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"26880f4a-2c23-4107-86b6-937a82c2fcb1","Type":"ContainerStarted","Data":"753c535df84875a5c4f445f3b4408f4740158a86b06c53f4dd7559b4d9f58731"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.485828 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-9c86db6bb-5dfhm"] Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.487227 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.645571193 podStartE2EDuration="24.48721692s" podCreationTimestamp="2025-10-13 07:04:51 +0000 UTC" firstStartedPulling="2025-10-13 07:04:52.38375154 +0000 UTC m=+1100.071196732" lastFinishedPulling="2025-10-13 07:05:13.225397267 +0000 UTC m=+1120.912842459" observedRunningTime="2025-10-13 07:05:15.48457861 +0000 UTC m=+1123.172023802" watchObservedRunningTime="2025-10-13 07:05:15.48721692 +0000 UTC m=+1123.174662112" Oct 13 07:05:15 crc kubenswrapper[4664]: W1013 07:05:15.490959 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod869b5463_3457_4467_b860_33f66c30e334.slice/crio-a2d1650f8bdc2bc56d784d99946476e9bbfb03bee8d96a79b97b88e36335666c WatchSource:0}: Error finding container a2d1650f8bdc2bc56d784d99946476e9bbfb03bee8d96a79b97b88e36335666c: Status 404 returned error can't find the container with id a2d1650f8bdc2bc56d784d99946476e9bbfb03bee8d96a79b97b88e36335666c Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.507691 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9df6477f-n8297" event={"ID":"6136e7f8-cad7-44a1-b237-70f4e309366c","Type":"ContainerStarted","Data":"82a27cca59003fdd0b42be0848a0327b33bef9614ac89d1470655d26432afd20"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.510866 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-564db64b9c-vkx7h" event={"ID":"10a9562e-4b1b-4183-bb7a-c3f934109c1d","Type":"ContainerStarted","Data":"87e21c7c55bd92ba277cd9aa2e5cd7a52eb56158123fdd7c48c4f219ebe8251c"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.513004 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" event={"ID":"19797988-adf9-40b6-8e07-f2e0869d3db6","Type":"ContainerStarted","Data":"fe68a7c4dc50ab4c938db733ab5b5b0842178279c84c675970150a2888ceb9c8"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.513109 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" podUID="19797988-adf9-40b6-8e07-f2e0869d3db6" containerName="heat-cfnapi" containerID="cri-o://fe68a7c4dc50ab4c938db733ab5b5b0842178279c84c675970150a2888ceb9c8" gracePeriod=60 Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.513343 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.526069 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ff491711-d62f-403e-8e23-3fa1d4137432","Type":"ContainerDied","Data":"10fb23438a565e9f20f57a2cfcf175423c3fce26f4ae99148247308904e48bff"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.526112 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10fb23438a565e9f20f57a2cfcf175423c3fce26f4ae99148247308904e48bff" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.532240 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5758f76974-8cm64" event={"ID":"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723","Type":"ContainerStarted","Data":"eed63fb43c233bd43921992b315b134bb8656b2500af465e4d43e6791aeb4586"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.532419 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.532429 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-5758f76974-8cm64" podUID="3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" containerName="heat-api" containerID="cri-o://eed63fb43c233bd43921992b315b134bb8656b2500af465e4d43e6791aeb4586" gracePeriod=60 Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.545004 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66b9b87ff8-xsb2c" event={"ID":"23693d9c-d47d-406d-864d-fa1c94b8f381","Type":"ContainerStarted","Data":"feffafc4e8fd1b603ecd5afaabc6c42ee3d14dd3a8a0a47e257d3e4d6024cee2"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.550096 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-tdsgr" event={"ID":"417f8a42-5f63-4637-9644-ebb89537f1be","Type":"ContainerStarted","Data":"a83e01897584b3ebc55f3f071d360db7f756f332a0409dde17e374074767ade1"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.561357 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.584832 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5758f76974-8cm64" podStartSLOduration=4.807861775 podStartE2EDuration="16.584776796s" podCreationTimestamp="2025-10-13 07:04:59 +0000 UTC" firstStartedPulling="2025-10-13 07:05:01.42336921 +0000 UTC m=+1109.110814402" lastFinishedPulling="2025-10-13 07:05:13.200284231 +0000 UTC m=+1120.887729423" observedRunningTime="2025-10-13 07:05:15.566043083 +0000 UTC m=+1123.253488275" watchObservedRunningTime="2025-10-13 07:05:15.584776796 +0000 UTC m=+1123.272221988" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.586834 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-jw2p6" event={"ID":"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e","Type":"ContainerStarted","Data":"52c088932f983e2c8fe4fcf31e3aac94c4ec99d48c62f4cf1d669165f533b862"} Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.586933 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" podStartSLOduration=4.594541394 podStartE2EDuration="16.586922885s" podCreationTimestamp="2025-10-13 07:04:59 +0000 UTC" firstStartedPulling="2025-10-13 07:05:00.92450283 +0000 UTC m=+1108.611948022" lastFinishedPulling="2025-10-13 07:05:12.916884321 +0000 UTC m=+1120.604329513" observedRunningTime="2025-10-13 07:05:15.544563754 +0000 UTC m=+1123.232008936" watchObservedRunningTime="2025-10-13 07:05:15.586922885 +0000 UTC m=+1123.274368087" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.591130 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.639718 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.639758 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.674566 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-combined-ca-bundle\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.674679 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62chj\" (UniqueName: \"kubernetes.io/projected/ff491711-d62f-403e-8e23-3fa1d4137432-kube-api-access-62chj\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.674699 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-scripts\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.674781 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-logs\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.674860 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-config-data\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.674921 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-public-tls-certs\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.674977 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.675023 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-httpd-run\") pod \"ff491711-d62f-403e-8e23-3fa1d4137432\" (UID: \"ff491711-d62f-403e-8e23-3fa1d4137432\") " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.677741 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-logs" (OuterVolumeSpecName: "logs") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.678377 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.686286 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7445f97b5f-k8zxs" podStartSLOduration=14.686250779 podStartE2EDuration="14.686250779s" podCreationTimestamp="2025-10-13 07:05:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:15.674307937 +0000 UTC m=+1123.361753129" watchObservedRunningTime="2025-10-13 07:05:15.686250779 +0000 UTC m=+1123.373695961" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.704301 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-scripts" (OuterVolumeSpecName: "scripts") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.705555 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff491711-d62f-403e-8e23-3fa1d4137432-kube-api-access-62chj" (OuterVolumeSpecName: "kube-api-access-62chj") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "kube-api-access-62chj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.711031 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.762041 4664 scope.go:117] "RemoveContainer" containerID="0057dffe25092120226f3568a6fa3979743a1a3670deb37d0c927a082a6587f7" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.777263 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62chj\" (UniqueName: \"kubernetes.io/projected/ff491711-d62f-403e-8e23-3fa1d4137432-kube-api-access-62chj\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.777305 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.777317 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.777349 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Oct 13 07:05:15 crc kubenswrapper[4664]: I1013 07:05:15.777361 4664 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ff491711-d62f-403e-8e23-3fa1d4137432-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.152647 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.197561 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.206172 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.299736 4664 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.333754 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.371055 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-config-data" (OuterVolumeSpecName: "config-data") pod "ff491711-d62f-403e-8e23-3fa1d4137432" (UID: "ff491711-d62f-403e-8e23-3fa1d4137432"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.401536 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.401569 4664 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff491711-d62f-403e-8e23-3fa1d4137432-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.505712 4664 scope.go:117] "RemoveContainer" containerID="ff665f3fc9ab4ed4b95c7d9e6e27533265c4c868ec292889fd035852a37009a4" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.528422 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.650166 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerStarted","Data":"a2d1650f8bdc2bc56d784d99946476e9bbfb03bee8d96a79b97b88e36335666c"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.662218 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66b9b87ff8-xsb2c" event={"ID":"23693d9c-d47d-406d-864d-fa1c94b8f381","Type":"ContainerStarted","Data":"f295aa7b4dc90d0bf6b24862c8f97046aa3f0de611d64ad2ab988ab8aad2c022"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.663023 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.673656 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7445f97b5f-k8zxs" event={"ID":"b4132324-29a1-4d67-91a2-9b7ec6a7c960","Type":"ContainerStarted","Data":"a11621006e1a16656d817022361f141892c3b0fa320b30d9b4fb51662e117128"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.676390 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dzd5z" event={"ID":"20839d87-6d9f-4137-a447-9da5d1523e9c","Type":"ContainerStarted","Data":"b6a0deecec6269a6d45e1e88d1ca369326cc2d8d60fc22f87908437506b1a50d"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.679916 4664 generic.go:334] "Generic (PLEG): container finished" podID="417f8a42-5f63-4637-9644-ebb89537f1be" containerID="f3add747053c2305d656a0f13010a26e53b8bfeea8f17c2ef3ce8e2742fa3780" exitCode=0 Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.679971 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-tdsgr" event={"ID":"417f8a42-5f63-4637-9644-ebb89537f1be","Type":"ContainerDied","Data":"f3add747053c2305d656a0f13010a26e53b8bfeea8f17c2ef3ce8e2742fa3780"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.684596 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-9c86db6bb-5dfhm" event={"ID":"d2d2de6d-b7be-4b17-a73c-33d7dacf228c","Type":"ContainerStarted","Data":"341d5d94950e6aa9201c45038ec00f522d45d64641c10fc3255844b4a0c5652a"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.690109 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97b5bee2-4876-4d80-97cf-53c8d91521d9","Type":"ContainerStarted","Data":"ab79bdbc06e5019ef1fcdac4367cf32d3fd0d0ad8a6e0325d5a41bee392cd545"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.692893 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.693379 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" event={"ID":"9762d9c5-1f78-4001-93dd-fb76914d6523","Type":"ContainerStarted","Data":"62df895ee6c46483856db055475cfa80c42bc30375cb9f3ca5d4859c4560412f"} Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.712233 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-66b9b87ff8-xsb2c" podStartSLOduration=8.712212869 podStartE2EDuration="8.712212869s" podCreationTimestamp="2025-10-13 07:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:16.691972985 +0000 UTC m=+1124.379418197" watchObservedRunningTime="2025-10-13 07:05:16.712212869 +0000 UTC m=+1124.399658061" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.727354 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.739394 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.759910 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:05:16 crc kubenswrapper[4664]: E1013 07:05:16.760280 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-log" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.760297 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-log" Oct 13 07:05:16 crc kubenswrapper[4664]: E1013 07:05:16.760315 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-httpd" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.760322 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-httpd" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.760516 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-httpd" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.760534 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" containerName="glance-log" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.761698 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.766843 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.767020 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.775729 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909501 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909549 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-config-data\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909581 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c5e42f4-75e5-4e9e-97b6-dda31c400142-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909616 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c5e42f4-75e5-4e9e-97b6-dda31c400142-logs\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909640 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bhrz\" (UniqueName: \"kubernetes.io/projected/1c5e42f4-75e5-4e9e-97b6-dda31c400142-kube-api-access-5bhrz\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909697 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909749 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:16 crc kubenswrapper[4664]: I1013 07:05:16.909764 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-scripts\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.011853 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.011912 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-config-data\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.011954 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c5e42f4-75e5-4e9e-97b6-dda31c400142-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.011999 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c5e42f4-75e5-4e9e-97b6-dda31c400142-logs\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.012030 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bhrz\" (UniqueName: \"kubernetes.io/projected/1c5e42f4-75e5-4e9e-97b6-dda31c400142-kube-api-access-5bhrz\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.012103 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.012173 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.012194 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-scripts\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.012915 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.016246 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c5e42f4-75e5-4e9e-97b6-dda31c400142-logs\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.016785 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c5e42f4-75e5-4e9e-97b6-dda31c400142-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.022563 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.026343 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-config-data\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.030008 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-scripts\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.049517 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c5e42f4-75e5-4e9e-97b6-dda31c400142-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.065689 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bhrz\" (UniqueName: \"kubernetes.io/projected/1c5e42f4-75e5-4e9e-97b6-dda31c400142-kube-api-access-5bhrz\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.095421 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"1c5e42f4-75e5-4e9e-97b6-dda31c400142\") " pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.096494 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff491711-d62f-403e-8e23-3fa1d4137432" path="/var/lib/kubelet/pods/ff491711-d62f-403e-8e23-3fa1d4137432/volumes" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.161614 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.726924 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerStarted","Data":"01deeb5168e7be8e66bf260ddfb0f6d95539abda34fdf84818752bd2938dd98a"} Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.763265 4664 generic.go:334] "Generic (PLEG): container finished" podID="bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e" containerID="65822424319ed6996a7179ceb3ff1a384c1b7298deb08fe7c822bf57ff34ce99" exitCode=0 Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.763590 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-jw2p6" event={"ID":"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e","Type":"ContainerDied","Data":"65822424319ed6996a7179ceb3ff1a384c1b7298deb08fe7c822bf57ff34ce99"} Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.783144 4664 generic.go:334] "Generic (PLEG): container finished" podID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerID="a823195ecd22bc4556cc7762ec500e6b7c53ada4c06e4adf855c084eaadabbe7" exitCode=1 Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.783202 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" event={"ID":"9762d9c5-1f78-4001-93dd-fb76914d6523","Type":"ContainerDied","Data":"a823195ecd22bc4556cc7762ec500e6b7c53ada4c06e4adf855c084eaadabbe7"} Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.783827 4664 scope.go:117] "RemoveContainer" containerID="a823195ecd22bc4556cc7762ec500e6b7c53ada4c06e4adf855c084eaadabbe7" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.796461 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-9c86db6bb-5dfhm" event={"ID":"d2d2de6d-b7be-4b17-a73c-33d7dacf228c","Type":"ContainerStarted","Data":"1f797d9fe33ce96e6cc56aaf405b9b0b0bd5ecbc2e607e2842f79c93edfd5ef4"} Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.799555 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.834389 4664 generic.go:334] "Generic (PLEG): container finished" podID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerID="6bb414244b0aef16916b1e016a4cf9f5b80ebbba2715b00016ad8e3f396e7d58" exitCode=1 Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.835003 4664 scope.go:117] "RemoveContainer" containerID="6bb414244b0aef16916b1e016a4cf9f5b80ebbba2715b00016ad8e3f396e7d58" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.835335 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9df6477f-n8297" event={"ID":"6136e7f8-cad7-44a1-b237-70f4e309366c","Type":"ContainerDied","Data":"6bb414244b0aef16916b1e016a4cf9f5b80ebbba2715b00016ad8e3f396e7d58"} Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.864519 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-564db64b9c-vkx7h" event={"ID":"10a9562e-4b1b-4183-bb7a-c3f934109c1d","Type":"ContainerStarted","Data":"e23916b38768550d05c4d380c7aeaa857643f6cec6fe2742e4f70e1b9921cf9a"} Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.865335 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.894347 4664 generic.go:334] "Generic (PLEG): container finished" podID="20839d87-6d9f-4137-a447-9da5d1523e9c" containerID="26d329577c21c19446751da68a7af1193fe877154655e89822ddb6890b610a34" exitCode=0 Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.894746 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dzd5z" event={"ID":"20839d87-6d9f-4137-a447-9da5d1523e9c","Type":"ContainerDied","Data":"26d329577c21c19446751da68a7af1193fe877154655e89822ddb6890b610a34"} Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.903262 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-9c86db6bb-5dfhm" podStartSLOduration=11.903243195 podStartE2EDuration="11.903243195s" podCreationTimestamp="2025-10-13 07:05:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:17.870971286 +0000 UTC m=+1125.558416488" watchObservedRunningTime="2025-10-13 07:05:17.903243195 +0000 UTC m=+1125.590688377" Oct 13 07:05:17 crc kubenswrapper[4664]: I1013 07:05:17.934446 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-564db64b9c-vkx7h" podStartSLOduration=9.934422914 podStartE2EDuration="9.934422914s" podCreationTimestamp="2025-10-13 07:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:17.911641761 +0000 UTC m=+1125.599086963" watchObservedRunningTime="2025-10-13 07:05:17.934422914 +0000 UTC m=+1125.621868106" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.067915 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.386360 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-tdsgr" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.488588 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7hlb\" (UniqueName: \"kubernetes.io/projected/417f8a42-5f63-4637-9644-ebb89537f1be-kube-api-access-l7hlb\") pod \"417f8a42-5f63-4637-9644-ebb89537f1be\" (UID: \"417f8a42-5f63-4637-9644-ebb89537f1be\") " Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.494257 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/417f8a42-5f63-4637-9644-ebb89537f1be-kube-api-access-l7hlb" (OuterVolumeSpecName: "kube-api-access-l7hlb") pod "417f8a42-5f63-4637-9644-ebb89537f1be" (UID: "417f8a42-5f63-4637-9644-ebb89537f1be"). InnerVolumeSpecName "kube-api-access-l7hlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.596613 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7hlb\" (UniqueName: \"kubernetes.io/projected/417f8a42-5f63-4637-9644-ebb89537f1be-kube-api-access-l7hlb\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.909860 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97b5bee2-4876-4d80-97cf-53c8d91521d9","Type":"ContainerStarted","Data":"eaa588b0f0ef6b662acbc23079d9c3486e02825578809f164e027742c6a556ec"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.916395 4664 generic.go:334] "Generic (PLEG): container finished" podID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerID="edb368fb2b90a86f8b5cf8dbcdbe841a8c7ef1178d9d56a64c1acfb2e63d34c0" exitCode=137 Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.916492 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerDied","Data":"edb368fb2b90a86f8b5cf8dbcdbe841a8c7ef1178d9d56a64c1acfb2e63d34c0"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.916712 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerStarted","Data":"952bdb7363dae79d4926e1ca1332d7aa4c3a95f0e8a9bb7e1c14b8d80cd4c504"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.922278 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-tdsgr" event={"ID":"417f8a42-5f63-4637-9644-ebb89537f1be","Type":"ContainerDied","Data":"a83e01897584b3ebc55f3f071d360db7f756f332a0409dde17e374074767ade1"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.922374 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a83e01897584b3ebc55f3f071d360db7f756f332a0409dde17e374074767ade1" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.922478 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-tdsgr" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.932821 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerStarted","Data":"5a208df358f19c4fd4ffef33860234881da66d706f5343ffbbdd7f142b9d7824"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.939431 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c5e42f4-75e5-4e9e-97b6-dda31c400142","Type":"ContainerStarted","Data":"3a4c61763a325a309fe45f0bb52b7a03e1024be074922e7eb918ee2c829a7bcb"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.951852 4664 generic.go:334] "Generic (PLEG): container finished" podID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerID="6dd7403fc2ce851531d7d9fd3780f8fa218eb32de5a0368ce4f3d96e46b9049b" exitCode=137 Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.951900 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerDied","Data":"6dd7403fc2ce851531d7d9fd3780f8fa218eb32de5a0368ce4f3d96e46b9049b"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.952201 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerStarted","Data":"3d2f032d21ef8d8ed4b549c11a26aedfe8563fd4c11924d540cf80427fd370c5"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.959304 4664 generic.go:334] "Generic (PLEG): container finished" podID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerID="87b081852245ab31dbae266f4191547ff1ecd6b59cab684af8285d95df87e4cf" exitCode=1 Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.959404 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" event={"ID":"9762d9c5-1f78-4001-93dd-fb76914d6523","Type":"ContainerDied","Data":"87b081852245ab31dbae266f4191547ff1ecd6b59cab684af8285d95df87e4cf"} Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.959438 4664 scope.go:117] "RemoveContainer" containerID="a823195ecd22bc4556cc7762ec500e6b7c53ada4c06e4adf855c084eaadabbe7" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.959874 4664 scope.go:117] "RemoveContainer" containerID="87b081852245ab31dbae266f4191547ff1ecd6b59cab684af8285d95df87e4cf" Oct 13 07:05:18 crc kubenswrapper[4664]: E1013 07:05:18.960118 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-754ccf95b4-8cxkq_openstack(9762d9c5-1f78-4001-93dd-fb76914d6523)\"" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.963805 4664 generic.go:334] "Generic (PLEG): container finished" podID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerID="95662435cab9d08c88f07af41423bd29ae10bcd2886bd068e242d027911eaa95" exitCode=1 Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.964758 4664 scope.go:117] "RemoveContainer" containerID="95662435cab9d08c88f07af41423bd29ae10bcd2886bd068e242d027911eaa95" Oct 13 07:05:18 crc kubenswrapper[4664]: E1013 07:05:18.964945 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-9df6477f-n8297_openstack(6136e7f8-cad7-44a1-b237-70f4e309366c)\"" pod="openstack/heat-api-9df6477f-n8297" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" Oct 13 07:05:18 crc kubenswrapper[4664]: I1013 07:05:18.964974 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9df6477f-n8297" event={"ID":"6136e7f8-cad7-44a1-b237-70f4e309366c","Type":"ContainerDied","Data":"95662435cab9d08c88f07af41423bd29ae10bcd2886bd068e242d027911eaa95"} Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.051593 4664 scope.go:117] "RemoveContainer" containerID="6bb414244b0aef16916b1e016a4cf9f5b80ebbba2715b00016ad8e3f396e7d58" Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.519309 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jw2p6" Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.649209 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cf9j\" (UniqueName: \"kubernetes.io/projected/bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e-kube-api-access-2cf9j\") pod \"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e\" (UID: \"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e\") " Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.653986 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e-kube-api-access-2cf9j" (OuterVolumeSpecName: "kube-api-access-2cf9j") pod "bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e" (UID: "bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e"). InnerVolumeSpecName "kube-api-access-2cf9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.753004 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cf9j\" (UniqueName: \"kubernetes.io/projected/bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e-kube-api-access-2cf9j\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.767741 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dzd5z" Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.855482 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrnjl\" (UniqueName: \"kubernetes.io/projected/20839d87-6d9f-4137-a447-9da5d1523e9c-kube-api-access-xrnjl\") pod \"20839d87-6d9f-4137-a447-9da5d1523e9c\" (UID: \"20839d87-6d9f-4137-a447-9da5d1523e9c\") " Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.886832 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20839d87-6d9f-4137-a447-9da5d1523e9c-kube-api-access-xrnjl" (OuterVolumeSpecName: "kube-api-access-xrnjl") pod "20839d87-6d9f-4137-a447-9da5d1523e9c" (UID: "20839d87-6d9f-4137-a447-9da5d1523e9c"). InnerVolumeSpecName "kube-api-access-xrnjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:19 crc kubenswrapper[4664]: I1013 07:05:19.958040 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrnjl\" (UniqueName: \"kubernetes.io/projected/20839d87-6d9f-4137-a447-9da5d1523e9c-kube-api-access-xrnjl\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.007406 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97b5bee2-4876-4d80-97cf-53c8d91521d9","Type":"ContainerStarted","Data":"08104b342dd40c748a13d7d6787696e9fe7fc2c1f9aaaf164afc6416ca63ad7c"} Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.016037 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dzd5z" event={"ID":"20839d87-6d9f-4137-a447-9da5d1523e9c","Type":"ContainerDied","Data":"b6a0deecec6269a6d45e1e88d1ca369326cc2d8d60fc22f87908437506b1a50d"} Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.016083 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6a0deecec6269a6d45e1e88d1ca369326cc2d8d60fc22f87908437506b1a50d" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.016143 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dzd5z" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.035119 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c5e42f4-75e5-4e9e-97b6-dda31c400142","Type":"ContainerStarted","Data":"c4c4229e40432d55ddc15cb312cefc510b347338882739a5b6f8d975a14fb1f5"} Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.052950 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-jw2p6" event={"ID":"bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e","Type":"ContainerDied","Data":"52c088932f983e2c8fe4fcf31e3aac94c4ec99d48c62f4cf1d669165f533b862"} Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.052991 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52c088932f983e2c8fe4fcf31e3aac94c4ec99d48c62f4cf1d669165f533b862" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.053063 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jw2p6" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.062929 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.062909907 podStartE2EDuration="6.062909907s" podCreationTimestamp="2025-10-13 07:05:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:20.035424958 +0000 UTC m=+1127.722870160" watchObservedRunningTime="2025-10-13 07:05:20.062909907 +0000 UTC m=+1127.750355099" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.070043 4664 scope.go:117] "RemoveContainer" containerID="87b081852245ab31dbae266f4191547ff1ecd6b59cab684af8285d95df87e4cf" Oct 13 07:05:20 crc kubenswrapper[4664]: E1013 07:05:20.070255 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-754ccf95b4-8cxkq_openstack(9762d9c5-1f78-4001-93dd-fb76914d6523)\"" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.138124 4664 scope.go:117] "RemoveContainer" containerID="95662435cab9d08c88f07af41423bd29ae10bcd2886bd068e242d027911eaa95" Oct 13 07:05:20 crc kubenswrapper[4664]: E1013 07:05:20.138545 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-9df6477f-n8297_openstack(6136e7f8-cad7-44a1-b237-70f4e309366c)\"" pod="openstack/heat-api-9df6477f-n8297" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" Oct 13 07:05:20 crc kubenswrapper[4664]: I1013 07:05:20.260317 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:05:21 crc kubenswrapper[4664]: I1013 07:05:21.147385 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerStarted","Data":"b7389ea9df97006592d64758102acde659d2f4e970fa605afce1832a3b050b30"} Oct 13 07:05:21 crc kubenswrapper[4664]: I1013 07:05:21.150720 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c5e42f4-75e5-4e9e-97b6-dda31c400142","Type":"ContainerStarted","Data":"16ae52f6cef36895fe5a5a7c0aa6125a8f3f4c7ae643258f3e9509ca59991634"} Oct 13 07:05:21 crc kubenswrapper[4664]: I1013 07:05:21.176834 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.176818367 podStartE2EDuration="5.176818367s" podCreationTimestamp="2025-10-13 07:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:21.171233566 +0000 UTC m=+1128.858678758" watchObservedRunningTime="2025-10-13 07:05:21.176818367 +0000 UTC m=+1128.864263559" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.087067 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.099540 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7445f97b5f-k8zxs" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.175388 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerStarted","Data":"5490e7cfed70127d2f2deb6891567455e2d58f712e2d0aa05a41ba58a65170bd"} Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.199481 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.260026565 podStartE2EDuration="9.199463788s" podCreationTimestamp="2025-10-13 07:05:13 +0000 UTC" firstStartedPulling="2025-10-13 07:05:15.538969213 +0000 UTC m=+1123.226414395" lastFinishedPulling="2025-10-13 07:05:21.478406416 +0000 UTC m=+1129.165851618" observedRunningTime="2025-10-13 07:05:22.195042989 +0000 UTC m=+1129.882488181" watchObservedRunningTime="2025-10-13 07:05:22.199463788 +0000 UTC m=+1129.886908980" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.279061 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.279704 4664 scope.go:117] "RemoveContainer" containerID="95662435cab9d08c88f07af41423bd29ae10bcd2886bd068e242d027911eaa95" Oct 13 07:05:22 crc kubenswrapper[4664]: E1013 07:05:22.279917 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-9df6477f-n8297_openstack(6136e7f8-cad7-44a1-b237-70f4e309366c)\"" pod="openstack/heat-api-9df6477f-n8297" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.280229 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.322510 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.322551 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.323180 4664 scope.go:117] "RemoveContainer" containerID="87b081852245ab31dbae266f4191547ff1ecd6b59cab684af8285d95df87e4cf" Oct 13 07:05:22 crc kubenswrapper[4664]: E1013 07:05:22.323402 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-754ccf95b4-8cxkq_openstack(9762d9c5-1f78-4001-93dd-fb76914d6523)\"" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.750757 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:05:22 crc kubenswrapper[4664]: I1013 07:05:22.766717 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:05:23 crc kubenswrapper[4664]: I1013 07:05:23.183225 4664 scope.go:117] "RemoveContainer" containerID="95662435cab9d08c88f07af41423bd29ae10bcd2886bd068e242d027911eaa95" Oct 13 07:05:23 crc kubenswrapper[4664]: E1013 07:05:23.183427 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-9df6477f-n8297_openstack(6136e7f8-cad7-44a1-b237-70f4e309366c)\"" pod="openstack/heat-api-9df6477f-n8297" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" Oct 13 07:05:23 crc kubenswrapper[4664]: I1013 07:05:23.183970 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.028667 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.203351 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-central-agent" containerID="cri-o://01deeb5168e7be8e66bf260ddfb0f6d95539abda34fdf84818752bd2938dd98a" gracePeriod=30 Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.203892 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-notification-agent" containerID="cri-o://5a208df358f19c4fd4ffef33860234881da66d706f5343ffbbdd7f142b9d7824" gracePeriod=30 Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.203867 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="sg-core" containerID="cri-o://b7389ea9df97006592d64758102acde659d2f4e970fa605afce1832a3b050b30" gracePeriod=30 Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.203940 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="proxy-httpd" containerID="cri-o://5490e7cfed70127d2f2deb6891567455e2d58f712e2d0aa05a41ba58a65170bd" gracePeriod=30 Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.562825 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.562876 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.619326 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:25 crc kubenswrapper[4664]: I1013 07:05:25.667847 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.216362 4664 generic.go:334] "Generic (PLEG): container finished" podID="869b5463-3457-4467-b860-33f66c30e334" containerID="5490e7cfed70127d2f2deb6891567455e2d58f712e2d0aa05a41ba58a65170bd" exitCode=0 Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.216393 4664 generic.go:334] "Generic (PLEG): container finished" podID="869b5463-3457-4467-b860-33f66c30e334" containerID="b7389ea9df97006592d64758102acde659d2f4e970fa605afce1832a3b050b30" exitCode=2 Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.216404 4664 generic.go:334] "Generic (PLEG): container finished" podID="869b5463-3457-4467-b860-33f66c30e334" containerID="5a208df358f19c4fd4ffef33860234881da66d706f5343ffbbdd7f142b9d7824" exitCode=0 Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.217624 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerDied","Data":"5490e7cfed70127d2f2deb6891567455e2d58f712e2d0aa05a41ba58a65170bd"} Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.217676 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.217692 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.217702 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerDied","Data":"b7389ea9df97006592d64758102acde659d2f4e970fa605afce1832a3b050b30"} Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.217716 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerDied","Data":"5a208df358f19c4fd4ffef33860234881da66d706f5343ffbbdd7f142b9d7824"} Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.633263 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-a1f7-account-create-khkpz"] Oct 13 07:05:26 crc kubenswrapper[4664]: E1013 07:05:26.634048 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20839d87-6d9f-4137-a447-9da5d1523e9c" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.634065 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="20839d87-6d9f-4137-a447-9da5d1523e9c" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: E1013 07:05:26.634082 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="417f8a42-5f63-4637-9644-ebb89537f1be" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.634088 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="417f8a42-5f63-4637-9644-ebb89537f1be" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: E1013 07:05:26.634115 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.634121 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.634295 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.634321 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="417f8a42-5f63-4637-9644-ebb89537f1be" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.634333 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="20839d87-6d9f-4137-a447-9da5d1523e9c" containerName="mariadb-database-create" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.634928 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f7-account-create-khkpz" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.644686 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.665163 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a1f7-account-create-khkpz"] Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.691819 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnx5f\" (UniqueName: \"kubernetes.io/projected/00359356-4b01-4fe2-9818-1aee78df254c-kube-api-access-bnx5f\") pod \"nova-api-a1f7-account-create-khkpz\" (UID: \"00359356-4b01-4fe2-9818-1aee78df254c\") " pod="openstack/nova-api-a1f7-account-create-khkpz" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.760754 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-564db64b9c-vkx7h" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.794171 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnx5f\" (UniqueName: \"kubernetes.io/projected/00359356-4b01-4fe2-9818-1aee78df254c-kube-api-access-bnx5f\") pod \"nova-api-a1f7-account-create-khkpz\" (UID: \"00359356-4b01-4fe2-9818-1aee78df254c\") " pod="openstack/nova-api-a1f7-account-create-khkpz" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.801201 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-66b9b87ff8-xsb2c" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.890054 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnx5f\" (UniqueName: \"kubernetes.io/projected/00359356-4b01-4fe2-9818-1aee78df254c-kube-api-access-bnx5f\") pod \"nova-api-a1f7-account-create-khkpz\" (UID: \"00359356-4b01-4fe2-9818-1aee78df254c\") " pod="openstack/nova-api-a1f7-account-create-khkpz" Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.929855 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-754ccf95b4-8cxkq"] Oct 13 07:05:26 crc kubenswrapper[4664]: I1013 07:05:26.966735 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f7-account-create-khkpz" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.043814 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-a819-account-create-25fxh"] Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.045440 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a819-account-create-25fxh" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.050026 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.101187 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a819-account-create-25fxh"] Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.101220 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-9df6477f-n8297"] Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.108006 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbcd8\" (UniqueName: \"kubernetes.io/projected/7d38225c-2fec-4695-8e19-6518db14c972-kube-api-access-bbcd8\") pod \"nova-cell0-a819-account-create-25fxh\" (UID: \"7d38225c-2fec-4695-8e19-6518db14c972\") " pod="openstack/nova-cell0-a819-account-create-25fxh" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.163224 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.165466 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.196534 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-9c3b-account-create-5kwlq"] Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.197784 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9c3b-account-create-5kwlq" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.207665 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.209975 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbcd8\" (UniqueName: \"kubernetes.io/projected/7d38225c-2fec-4695-8e19-6518db14c972-kube-api-access-bbcd8\") pod \"nova-cell0-a819-account-create-25fxh\" (UID: \"7d38225c-2fec-4695-8e19-6518db14c972\") " pod="openstack/nova-cell0-a819-account-create-25fxh" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.212586 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9c3b-account-create-5kwlq"] Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.263415 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.309467 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-9c86db6bb-5dfhm" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.312743 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ldk7\" (UniqueName: \"kubernetes.io/projected/e3cda89c-8f77-40c1-9333-fb92c9e78f02-kube-api-access-9ldk7\") pod \"nova-cell1-9c3b-account-create-5kwlq\" (UID: \"e3cda89c-8f77-40c1-9333-fb92c9e78f02\") " pod="openstack/nova-cell1-9c3b-account-create-5kwlq" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.324070 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.330218 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbcd8\" (UniqueName: \"kubernetes.io/projected/7d38225c-2fec-4695-8e19-6518db14c972-kube-api-access-bbcd8\") pod \"nova-cell0-a819-account-create-25fxh\" (UID: \"7d38225c-2fec-4695-8e19-6518db14c972\") " pod="openstack/nova-cell0-a819-account-create-25fxh" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.361066 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.414270 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ldk7\" (UniqueName: \"kubernetes.io/projected/e3cda89c-8f77-40c1-9333-fb92c9e78f02-kube-api-access-9ldk7\") pod \"nova-cell1-9c3b-account-create-5kwlq\" (UID: \"e3cda89c-8f77-40c1-9333-fb92c9e78f02\") " pod="openstack/nova-cell1-9c3b-account-create-5kwlq" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.440451 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6657f6fcdb-mmtx6"] Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.440679 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-6657f6fcdb-mmtx6" podUID="de4b5d93-3b16-4103-8688-3365fc6302a8" containerName="heat-engine" containerID="cri-o://4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" gracePeriod=60 Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.461445 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ldk7\" (UniqueName: \"kubernetes.io/projected/e3cda89c-8f77-40c1-9333-fb92c9e78f02-kube-api-access-9ldk7\") pod \"nova-cell1-9c3b-account-create-5kwlq\" (UID: \"e3cda89c-8f77-40c1-9333-fb92c9e78f02\") " pod="openstack/nova-cell1-9c3b-account-create-5kwlq" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.509836 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a819-account-create-25fxh" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.641358 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9c3b-account-create-5kwlq" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.683653 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.738223 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.739326 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.740387 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.744152 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxjz9\" (UniqueName: \"kubernetes.io/projected/9762d9c5-1f78-4001-93dd-fb76914d6523-kube-api-access-sxjz9\") pod \"9762d9c5-1f78-4001-93dd-fb76914d6523\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.744248 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-combined-ca-bundle\") pod \"9762d9c5-1f78-4001-93dd-fb76914d6523\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.744309 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data-custom\") pod \"9762d9c5-1f78-4001-93dd-fb76914d6523\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.744328 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data\") pod \"9762d9c5-1f78-4001-93dd-fb76914d6523\" (UID: \"9762d9c5-1f78-4001-93dd-fb76914d6523\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.773236 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9762d9c5-1f78-4001-93dd-fb76914d6523" (UID: "9762d9c5-1f78-4001-93dd-fb76914d6523"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.775338 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9762d9c5-1f78-4001-93dd-fb76914d6523-kube-api-access-sxjz9" (OuterVolumeSpecName: "kube-api-access-sxjz9") pod "9762d9c5-1f78-4001-93dd-fb76914d6523" (UID: "9762d9c5-1f78-4001-93dd-fb76914d6523"). InnerVolumeSpecName "kube-api-access-sxjz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.800811 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9762d9c5-1f78-4001-93dd-fb76914d6523" (UID: "9762d9c5-1f78-4001-93dd-fb76914d6523"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.845526 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data\") pod \"6136e7f8-cad7-44a1-b237-70f4e309366c\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.845766 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-combined-ca-bundle\") pod \"6136e7f8-cad7-44a1-b237-70f4e309366c\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.845934 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pqpz\" (UniqueName: \"kubernetes.io/projected/6136e7f8-cad7-44a1-b237-70f4e309366c-kube-api-access-4pqpz\") pod \"6136e7f8-cad7-44a1-b237-70f4e309366c\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.846156 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data-custom\") pod \"6136e7f8-cad7-44a1-b237-70f4e309366c\" (UID: \"6136e7f8-cad7-44a1-b237-70f4e309366c\") " Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.847401 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.847478 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.847543 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxjz9\" (UniqueName: \"kubernetes.io/projected/9762d9c5-1f78-4001-93dd-fb76914d6523-kube-api-access-sxjz9\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.858165 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6136e7f8-cad7-44a1-b237-70f4e309366c-kube-api-access-4pqpz" (OuterVolumeSpecName: "kube-api-access-4pqpz") pod "6136e7f8-cad7-44a1-b237-70f4e309366c" (UID: "6136e7f8-cad7-44a1-b237-70f4e309366c"). InnerVolumeSpecName "kube-api-access-4pqpz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.864179 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6136e7f8-cad7-44a1-b237-70f4e309366c" (UID: "6136e7f8-cad7-44a1-b237-70f4e309366c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.871165 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.871467 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.875967 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data" (OuterVolumeSpecName: "config-data") pod "9762d9c5-1f78-4001-93dd-fb76914d6523" (UID: "9762d9c5-1f78-4001-93dd-fb76914d6523"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.896727 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a1f7-account-create-khkpz"] Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.936144 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6136e7f8-cad7-44a1-b237-70f4e309366c" (UID: "6136e7f8-cad7-44a1-b237-70f4e309366c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.945850 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data" (OuterVolumeSpecName: "config-data") pod "6136e7f8-cad7-44a1-b237-70f4e309366c" (UID: "6136e7f8-cad7-44a1-b237-70f4e309366c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.949212 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.949234 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.949245 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pqpz\" (UniqueName: \"kubernetes.io/projected/6136e7f8-cad7-44a1-b237-70f4e309366c-kube-api-access-4pqpz\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.949256 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9762d9c5-1f78-4001-93dd-fb76914d6523-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:27 crc kubenswrapper[4664]: I1013 07:05:27.949266 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6136e7f8-cad7-44a1-b237-70f4e309366c-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.341721 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" event={"ID":"9762d9c5-1f78-4001-93dd-fb76914d6523","Type":"ContainerDied","Data":"62df895ee6c46483856db055475cfa80c42bc30375cb9f3ca5d4859c4560412f"} Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.342047 4664 scope.go:117] "RemoveContainer" containerID="87b081852245ab31dbae266f4191547ff1ecd6b59cab684af8285d95df87e4cf" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.342178 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-754ccf95b4-8cxkq" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.357995 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a819-account-create-25fxh"] Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.358991 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a1f7-account-create-khkpz" event={"ID":"00359356-4b01-4fe2-9818-1aee78df254c","Type":"ContainerStarted","Data":"559ef0765609e67603d1204a8d3c67f61fa2b22d2d33ae2350297a7222c9258b"} Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.359037 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a1f7-account-create-khkpz" event={"ID":"00359356-4b01-4fe2-9818-1aee78df254c","Type":"ContainerStarted","Data":"5e9c9f1ad11177eee28a8538253781f7aec71762f7f88c79e4742ab9b568f3c1"} Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.364484 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9df6477f-n8297" event={"ID":"6136e7f8-cad7-44a1-b237-70f4e309366c","Type":"ContainerDied","Data":"82a27cca59003fdd0b42be0848a0327b33bef9614ac89d1470655d26432afd20"} Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.364581 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9df6477f-n8297" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.382389 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9c3b-account-create-5kwlq"] Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.388999 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-a1f7-account-create-khkpz" podStartSLOduration=2.388979874 podStartE2EDuration="2.388979874s" podCreationTimestamp="2025-10-13 07:05:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:05:28.375699017 +0000 UTC m=+1136.063144199" watchObservedRunningTime="2025-10-13 07:05:28.388979874 +0000 UTC m=+1136.076425066" Oct 13 07:05:28 crc kubenswrapper[4664]: W1013 07:05:28.392680 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3cda89c_8f77_40c1_9333_fb92c9e78f02.slice/crio-ef19219fd270aee362db084b861df5ffc9c2094844aa615c60377f56aa09759b WatchSource:0}: Error finding container ef19219fd270aee362db084b861df5ffc9c2094844aa615c60377f56aa09759b: Status 404 returned error can't find the container with id ef19219fd270aee362db084b861df5ffc9c2094844aa615c60377f56aa09759b Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.393005 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a819-account-create-25fxh" event={"ID":"7d38225c-2fec-4695-8e19-6518db14c972","Type":"ContainerStarted","Data":"660446029f7df1c46dab1848461eb26793b1ac6e9d73c3a0883772570b2177fb"} Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.393184 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.393375 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.394753 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.444917 4664 scope.go:117] "RemoveContainer" containerID="95662435cab9d08c88f07af41423bd29ae10bcd2886bd068e242d027911eaa95" Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.518220 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-754ccf95b4-8cxkq"] Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.581969 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-754ccf95b4-8cxkq"] Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.616055 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-9df6477f-n8297"] Oct 13 07:05:28 crc kubenswrapper[4664]: I1013 07:05:28.630964 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-9df6477f-n8297"] Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.059140 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" path="/var/lib/kubelet/pods/6136e7f8-cad7-44a1-b237-70f4e309366c/volumes" Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.059921 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" path="/var/lib/kubelet/pods/9762d9c5-1f78-4001-93dd-fb76914d6523/volumes" Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.404263 4664 generic.go:334] "Generic (PLEG): container finished" podID="00359356-4b01-4fe2-9818-1aee78df254c" containerID="559ef0765609e67603d1204a8d3c67f61fa2b22d2d33ae2350297a7222c9258b" exitCode=0 Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.404324 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a1f7-account-create-khkpz" event={"ID":"00359356-4b01-4fe2-9818-1aee78df254c","Type":"ContainerDied","Data":"559ef0765609e67603d1204a8d3c67f61fa2b22d2d33ae2350297a7222c9258b"} Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.407910 4664 generic.go:334] "Generic (PLEG): container finished" podID="e3cda89c-8f77-40c1-9333-fb92c9e78f02" containerID="fcb0cacd9b6c27413c44f3f5e9f159eef541d821f381b60a10437e83dc8886f7" exitCode=0 Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.407997 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9c3b-account-create-5kwlq" event={"ID":"e3cda89c-8f77-40c1-9333-fb92c9e78f02","Type":"ContainerDied","Data":"fcb0cacd9b6c27413c44f3f5e9f159eef541d821f381b60a10437e83dc8886f7"} Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.408025 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9c3b-account-create-5kwlq" event={"ID":"e3cda89c-8f77-40c1-9333-fb92c9e78f02","Type":"ContainerStarted","Data":"ef19219fd270aee362db084b861df5ffc9c2094844aa615c60377f56aa09759b"} Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.409653 4664 generic.go:334] "Generic (PLEG): container finished" podID="7d38225c-2fec-4695-8e19-6518db14c972" containerID="362a329a787f3cfb802401f618eda0ad9ee4f1d21430a7dca4c88a1bc01b5706" exitCode=0 Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.409701 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a819-account-create-25fxh" event={"ID":"7d38225c-2fec-4695-8e19-6518db14c972","Type":"ContainerDied","Data":"362a329a787f3cfb802401f618eda0ad9ee4f1d21430a7dca4c88a1bc01b5706"} Oct 13 07:05:29 crc kubenswrapper[4664]: I1013 07:05:29.409757 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:05:30 crc kubenswrapper[4664]: E1013 07:05:30.091807 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Oct 13 07:05:30 crc kubenswrapper[4664]: E1013 07:05:30.096097 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Oct 13 07:05:30 crc kubenswrapper[4664]: E1013 07:05:30.100681 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Oct 13 07:05:30 crc kubenswrapper[4664]: E1013 07:05:30.100733 4664 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6657f6fcdb-mmtx6" podUID="de4b5d93-3b16-4103-8688-3365fc6302a8" containerName="heat-engine" Oct 13 07:05:30 crc kubenswrapper[4664]: I1013 07:05:30.992744 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9c3b-account-create-5kwlq" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.119478 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ldk7\" (UniqueName: \"kubernetes.io/projected/e3cda89c-8f77-40c1-9333-fb92c9e78f02-kube-api-access-9ldk7\") pod \"e3cda89c-8f77-40c1-9333-fb92c9e78f02\" (UID: \"e3cda89c-8f77-40c1-9333-fb92c9e78f02\") " Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.135992 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3cda89c-8f77-40c1-9333-fb92c9e78f02-kube-api-access-9ldk7" (OuterVolumeSpecName: "kube-api-access-9ldk7") pod "e3cda89c-8f77-40c1-9333-fb92c9e78f02" (UID: "e3cda89c-8f77-40c1-9333-fb92c9e78f02"). InnerVolumeSpecName "kube-api-access-9ldk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.222206 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ldk7\" (UniqueName: \"kubernetes.io/projected/e3cda89c-8f77-40c1-9333-fb92c9e78f02-kube-api-access-9ldk7\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.227250 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a819-account-create-25fxh" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.248097 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f7-account-create-khkpz" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.323499 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbcd8\" (UniqueName: \"kubernetes.io/projected/7d38225c-2fec-4695-8e19-6518db14c972-kube-api-access-bbcd8\") pod \"7d38225c-2fec-4695-8e19-6518db14c972\" (UID: \"7d38225c-2fec-4695-8e19-6518db14c972\") " Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.323837 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnx5f\" (UniqueName: \"kubernetes.io/projected/00359356-4b01-4fe2-9818-1aee78df254c-kube-api-access-bnx5f\") pod \"00359356-4b01-4fe2-9818-1aee78df254c\" (UID: \"00359356-4b01-4fe2-9818-1aee78df254c\") " Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.345603 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d38225c-2fec-4695-8e19-6518db14c972-kube-api-access-bbcd8" (OuterVolumeSpecName: "kube-api-access-bbcd8") pod "7d38225c-2fec-4695-8e19-6518db14c972" (UID: "7d38225c-2fec-4695-8e19-6518db14c972"). InnerVolumeSpecName "kube-api-access-bbcd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.345709 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00359356-4b01-4fe2-9818-1aee78df254c-kube-api-access-bnx5f" (OuterVolumeSpecName: "kube-api-access-bnx5f") pod "00359356-4b01-4fe2-9818-1aee78df254c" (UID: "00359356-4b01-4fe2-9818-1aee78df254c"). InnerVolumeSpecName "kube-api-access-bnx5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.428073 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbcd8\" (UniqueName: \"kubernetes.io/projected/7d38225c-2fec-4695-8e19-6518db14c972-kube-api-access-bbcd8\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.428106 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnx5f\" (UniqueName: \"kubernetes.io/projected/00359356-4b01-4fe2-9818-1aee78df254c-kube-api-access-bnx5f\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.432204 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9c3b-account-create-5kwlq" event={"ID":"e3cda89c-8f77-40c1-9333-fb92c9e78f02","Type":"ContainerDied","Data":"ef19219fd270aee362db084b861df5ffc9c2094844aa615c60377f56aa09759b"} Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.432245 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef19219fd270aee362db084b861df5ffc9c2094844aa615c60377f56aa09759b" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.432304 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9c3b-account-create-5kwlq" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.434907 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a819-account-create-25fxh" event={"ID":"7d38225c-2fec-4695-8e19-6518db14c972","Type":"ContainerDied","Data":"660446029f7df1c46dab1848461eb26793b1ac6e9d73c3a0883772570b2177fb"} Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.434946 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="660446029f7df1c46dab1848461eb26793b1ac6e9d73c3a0883772570b2177fb" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.435013 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a819-account-create-25fxh" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.446991 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a1f7-account-create-khkpz" event={"ID":"00359356-4b01-4fe2-9818-1aee78df254c","Type":"ContainerDied","Data":"5e9c9f1ad11177eee28a8538253781f7aec71762f7f88c79e4742ab9b568f3c1"} Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.447035 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e9c9f1ad11177eee28a8538253781f7aec71762f7f88c79e4742ab9b568f3c1" Oct 13 07:05:31 crc kubenswrapper[4664]: I1013 07:05:31.447062 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f7-account-create-khkpz" Oct 13 07:05:34 crc kubenswrapper[4664]: I1013 07:05:34.131325 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:34 crc kubenswrapper[4664]: I1013 07:05:34.131842 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:05:34 crc kubenswrapper[4664]: I1013 07:05:34.163213 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 13 07:05:34 crc kubenswrapper[4664]: I1013 07:05:34.179999 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 13 07:05:34 crc kubenswrapper[4664]: I1013 07:05:34.180107 4664 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 13 07:05:34 crc kubenswrapper[4664]: I1013 07:05:34.321264 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 13 07:05:35 crc kubenswrapper[4664]: I1013 07:05:35.511488 4664 generic.go:334] "Generic (PLEG): container finished" podID="869b5463-3457-4467-b860-33f66c30e334" containerID="01deeb5168e7be8e66bf260ddfb0f6d95539abda34fdf84818752bd2938dd98a" exitCode=0 Oct 13 07:05:35 crc kubenswrapper[4664]: I1013 07:05:35.512040 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerDied","Data":"01deeb5168e7be8e66bf260ddfb0f6d95539abda34fdf84818752bd2938dd98a"} Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.063062 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.117325 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-scripts\") pod \"869b5463-3457-4467-b860-33f66c30e334\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.117474 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-sg-core-conf-yaml\") pod \"869b5463-3457-4467-b860-33f66c30e334\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.117590 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-log-httpd\") pod \"869b5463-3457-4467-b860-33f66c30e334\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.117998 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "869b5463-3457-4467-b860-33f66c30e334" (UID: "869b5463-3457-4467-b860-33f66c30e334"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.118063 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m67mb\" (UniqueName: \"kubernetes.io/projected/869b5463-3457-4467-b860-33f66c30e334-kube-api-access-m67mb\") pod \"869b5463-3457-4467-b860-33f66c30e334\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.118479 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-combined-ca-bundle\") pod \"869b5463-3457-4467-b860-33f66c30e334\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.118509 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-config-data\") pod \"869b5463-3457-4467-b860-33f66c30e334\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.118617 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-run-httpd\") pod \"869b5463-3457-4467-b860-33f66c30e334\" (UID: \"869b5463-3457-4467-b860-33f66c30e334\") " Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.119277 4664 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.119863 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "869b5463-3457-4467-b860-33f66c30e334" (UID: "869b5463-3457-4467-b860-33f66c30e334"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.131968 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-scripts" (OuterVolumeSpecName: "scripts") pod "869b5463-3457-4467-b860-33f66c30e334" (UID: "869b5463-3457-4467-b860-33f66c30e334"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.164367 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/869b5463-3457-4467-b860-33f66c30e334-kube-api-access-m67mb" (OuterVolumeSpecName: "kube-api-access-m67mb") pod "869b5463-3457-4467-b860-33f66c30e334" (UID: "869b5463-3457-4467-b860-33f66c30e334"). InnerVolumeSpecName "kube-api-access-m67mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.224932 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m67mb\" (UniqueName: \"kubernetes.io/projected/869b5463-3457-4467-b860-33f66c30e334-kube-api-access-m67mb\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.224978 4664 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/869b5463-3457-4467-b860-33f66c30e334-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.224990 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.283951 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "869b5463-3457-4467-b860-33f66c30e334" (UID: "869b5463-3457-4467-b860-33f66c30e334"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.293981 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "869b5463-3457-4467-b860-33f66c30e334" (UID: "869b5463-3457-4467-b860-33f66c30e334"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.327115 4664 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.327461 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.349612 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-config-data" (OuterVolumeSpecName: "config-data") pod "869b5463-3457-4467-b860-33f66c30e334" (UID: "869b5463-3457-4467-b860-33f66c30e334"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.429517 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/869b5463-3457-4467-b860-33f66c30e334-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.523589 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"869b5463-3457-4467-b860-33f66c30e334","Type":"ContainerDied","Data":"a2d1650f8bdc2bc56d784d99946476e9bbfb03bee8d96a79b97b88e36335666c"} Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.523639 4664 scope.go:117] "RemoveContainer" containerID="5490e7cfed70127d2f2deb6891567455e2d58f712e2d0aa05a41ba58a65170bd" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.523744 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.564437 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.591824 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.598870 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599220 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="proxy-httpd" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599236 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="proxy-httpd" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599252 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3cda89c-8f77-40c1-9333-fb92c9e78f02" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599259 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3cda89c-8f77-40c1-9333-fb92c9e78f02" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599269 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-central-agent" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599275 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-central-agent" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599286 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerName="heat-cfnapi" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599293 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerName="heat-cfnapi" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599304 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d38225c-2fec-4695-8e19-6518db14c972" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599310 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d38225c-2fec-4695-8e19-6518db14c972" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599325 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-notification-agent" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599332 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-notification-agent" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599346 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerName="heat-api" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599352 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerName="heat-api" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599364 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="sg-core" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599370 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="sg-core" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599381 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerName="heat-api" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599386 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerName="heat-api" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.599399 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00359356-4b01-4fe2-9818-1aee78df254c" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599405 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="00359356-4b01-4fe2-9818-1aee78df254c" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599566 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerName="heat-cfnapi" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599574 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerName="heat-api" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599584 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3cda89c-8f77-40c1-9333-fb92c9e78f02" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599597 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="sg-core" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599606 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d38225c-2fec-4695-8e19-6518db14c972" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599615 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerName="heat-cfnapi" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599626 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6136e7f8-cad7-44a1-b237-70f4e309366c" containerName="heat-api" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599638 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="proxy-httpd" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599647 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="00359356-4b01-4fe2-9818-1aee78df254c" containerName="mariadb-account-create" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599673 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-central-agent" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.599691 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="869b5463-3457-4467-b860-33f66c30e334" containerName="ceilometer-notification-agent" Oct 13 07:05:36 crc kubenswrapper[4664]: E1013 07:05:36.600086 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerName="heat-cfnapi" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.600097 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9762d9c5-1f78-4001-93dd-fb76914d6523" containerName="heat-cfnapi" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.602074 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.604383 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.604609 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.619105 4664 scope.go:117] "RemoveContainer" containerID="b7389ea9df97006592d64758102acde659d2f4e970fa605afce1832a3b050b30" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.632950 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zghx\" (UniqueName: \"kubernetes.io/projected/386a209e-a350-45ad-855d-befdb86c0019-kube-api-access-8zghx\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.633029 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.633049 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-scripts\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.633069 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-config-data\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.633095 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.633206 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-run-httpd\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.633228 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-log-httpd\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.647002 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.694113 4664 scope.go:117] "RemoveContainer" containerID="5a208df358f19c4fd4ffef33860234881da66d706f5343ffbbdd7f142b9d7824" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.735237 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.735349 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-run-httpd\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.735387 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-log-httpd\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.735447 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zghx\" (UniqueName: \"kubernetes.io/projected/386a209e-a350-45ad-855d-befdb86c0019-kube-api-access-8zghx\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.735497 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.735515 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-scripts\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.735536 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-config-data\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.738242 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-log-httpd\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.738453 4664 scope.go:117] "RemoveContainer" containerID="01deeb5168e7be8e66bf260ddfb0f6d95539abda34fdf84818752bd2938dd98a" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.738768 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-run-httpd\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.753127 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.754643 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-config-data\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.755517 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.758990 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zghx\" (UniqueName: \"kubernetes.io/projected/386a209e-a350-45ad-855d-befdb86c0019-kube-api-access-8zghx\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.759616 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-scripts\") pod \"ceilometer-0\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " pod="openstack/ceilometer-0" Oct 13 07:05:36 crc kubenswrapper[4664]: I1013 07:05:36.933585 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.057879 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="869b5463-3457-4467-b860-33f66c30e334" path="/var/lib/kubelet/pods/869b5463-3457-4467-b860-33f66c30e334/volumes" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.606316 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.661317 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8bkjv"] Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.662399 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.664454 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-wc9bh" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.664593 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.665980 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.675627 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8bkjv"] Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.740499 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.865947 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.866098 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6mqt\" (UniqueName: \"kubernetes.io/projected/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-kube-api-access-w6mqt\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.866347 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-config-data\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.866449 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-scripts\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.872517 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.968288 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6mqt\" (UniqueName: \"kubernetes.io/projected/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-kube-api-access-w6mqt\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.968411 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-config-data\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.968470 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-scripts\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.968535 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.975164 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-scripts\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.976482 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-config-data\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:37 crc kubenswrapper[4664]: I1013 07:05:37.990160 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:38 crc kubenswrapper[4664]: I1013 07:05:38.001345 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6mqt\" (UniqueName: \"kubernetes.io/projected/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-kube-api-access-w6mqt\") pod \"nova-cell0-conductor-db-sync-8bkjv\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:38 crc kubenswrapper[4664]: I1013 07:05:38.278122 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:05:38 crc kubenswrapper[4664]: I1013 07:05:38.553809 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerStarted","Data":"0bb91f9917897cf91651c1da2ecd58245235daa1ea380b90176a6833222606d7"} Oct 13 07:05:38 crc kubenswrapper[4664]: I1013 07:05:38.554158 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerStarted","Data":"58dd02c7e9a312f97d34bbb5ecdf288b49caf59b2ac3a614d1afa5c4927bfcb3"} Oct 13 07:05:38 crc kubenswrapper[4664]: I1013 07:05:38.907910 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8bkjv"] Oct 13 07:05:39 crc kubenswrapper[4664]: I1013 07:05:39.581459 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerStarted","Data":"598f23a4ebaab0215f7b1e2aa79b3c861069ffd43514734524273f9368b3590e"} Oct 13 07:05:39 crc kubenswrapper[4664]: I1013 07:05:39.600168 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" event={"ID":"6d138f3a-fa9c-4ae4-873e-a8c335b0635d","Type":"ContainerStarted","Data":"b833a939b7a7394ef50c3e72f02dd6f83918555cd29289070c996bc76f069f78"} Oct 13 07:05:39 crc kubenswrapper[4664]: I1013 07:05:39.713311 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:05:40 crc kubenswrapper[4664]: E1013 07:05:40.102716 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Oct 13 07:05:40 crc kubenswrapper[4664]: E1013 07:05:40.105006 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Oct 13 07:05:40 crc kubenswrapper[4664]: E1013 07:05:40.106343 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Oct 13 07:05:40 crc kubenswrapper[4664]: E1013 07:05:40.106397 4664 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6657f6fcdb-mmtx6" podUID="de4b5d93-3b16-4103-8688-3365fc6302a8" containerName="heat-engine" Oct 13 07:05:41 crc kubenswrapper[4664]: I1013 07:05:41.627840 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerStarted","Data":"faa10799d7c2c8288cb88136ddf1958c8e218b345e1697b90a1127092cd16562"} Oct 13 07:05:42 crc kubenswrapper[4664]: I1013 07:05:42.645750 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerStarted","Data":"3b225f5839b938ad83046ae3cb9e80fd7575f48b350685eccbf35cb3397d7a06"} Oct 13 07:05:42 crc kubenswrapper[4664]: I1013 07:05:42.646077 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-central-agent" containerID="cri-o://0bb91f9917897cf91651c1da2ecd58245235daa1ea380b90176a6833222606d7" gracePeriod=30 Oct 13 07:05:42 crc kubenswrapper[4664]: I1013 07:05:42.646199 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="proxy-httpd" containerID="cri-o://3b225f5839b938ad83046ae3cb9e80fd7575f48b350685eccbf35cb3397d7a06" gracePeriod=30 Oct 13 07:05:42 crc kubenswrapper[4664]: I1013 07:05:42.646258 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="sg-core" containerID="cri-o://faa10799d7c2c8288cb88136ddf1958c8e218b345e1697b90a1127092cd16562" gracePeriod=30 Oct 13 07:05:42 crc kubenswrapper[4664]: I1013 07:05:42.646313 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-notification-agent" containerID="cri-o://598f23a4ebaab0215f7b1e2aa79b3c861069ffd43514734524273f9368b3590e" gracePeriod=30 Oct 13 07:05:42 crc kubenswrapper[4664]: I1013 07:05:42.646400 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 13 07:05:42 crc kubenswrapper[4664]: I1013 07:05:42.670295 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.564730419 podStartE2EDuration="6.670278899s" podCreationTimestamp="2025-10-13 07:05:36 +0000 UTC" firstStartedPulling="2025-10-13 07:05:37.62132547 +0000 UTC m=+1145.308770662" lastFinishedPulling="2025-10-13 07:05:41.72687395 +0000 UTC m=+1149.414319142" observedRunningTime="2025-10-13 07:05:42.668234673 +0000 UTC m=+1150.355679865" watchObservedRunningTime="2025-10-13 07:05:42.670278899 +0000 UTC m=+1150.357724091" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.370118 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.498283 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xg2jx\" (UniqueName: \"kubernetes.io/projected/de4b5d93-3b16-4103-8688-3365fc6302a8-kube-api-access-xg2jx\") pod \"de4b5d93-3b16-4103-8688-3365fc6302a8\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.498534 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-combined-ca-bundle\") pod \"de4b5d93-3b16-4103-8688-3365fc6302a8\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.499172 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data\") pod \"de4b5d93-3b16-4103-8688-3365fc6302a8\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.499243 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data-custom\") pod \"de4b5d93-3b16-4103-8688-3365fc6302a8\" (UID: \"de4b5d93-3b16-4103-8688-3365fc6302a8\") " Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.506536 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de4b5d93-3b16-4103-8688-3365fc6302a8-kube-api-access-xg2jx" (OuterVolumeSpecName: "kube-api-access-xg2jx") pod "de4b5d93-3b16-4103-8688-3365fc6302a8" (UID: "de4b5d93-3b16-4103-8688-3365fc6302a8"). InnerVolumeSpecName "kube-api-access-xg2jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.510090 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "de4b5d93-3b16-4103-8688-3365fc6302a8" (UID: "de4b5d93-3b16-4103-8688-3365fc6302a8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.572443 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data" (OuterVolumeSpecName: "config-data") pod "de4b5d93-3b16-4103-8688-3365fc6302a8" (UID: "de4b5d93-3b16-4103-8688-3365fc6302a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.583336 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de4b5d93-3b16-4103-8688-3365fc6302a8" (UID: "de4b5d93-3b16-4103-8688-3365fc6302a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.601368 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.601438 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.601455 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de4b5d93-3b16-4103-8688-3365fc6302a8-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.601467 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xg2jx\" (UniqueName: \"kubernetes.io/projected/de4b5d93-3b16-4103-8688-3365fc6302a8-kube-api-access-xg2jx\") on node \"crc\" DevicePath \"\"" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.656159 4664 generic.go:334] "Generic (PLEG): container finished" podID="de4b5d93-3b16-4103-8688-3365fc6302a8" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" exitCode=0 Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.656221 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6657f6fcdb-mmtx6" event={"ID":"de4b5d93-3b16-4103-8688-3365fc6302a8","Type":"ContainerDied","Data":"4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6"} Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.656248 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6657f6fcdb-mmtx6" event={"ID":"de4b5d93-3b16-4103-8688-3365fc6302a8","Type":"ContainerDied","Data":"99739b443e10e4e60446579ec52458e6c8e65084aa3e2cbb599c5ed517f7aee3"} Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.656263 4664 scope.go:117] "RemoveContainer" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.656377 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6657f6fcdb-mmtx6" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.667452 4664 generic.go:334] "Generic (PLEG): container finished" podID="386a209e-a350-45ad-855d-befdb86c0019" containerID="faa10799d7c2c8288cb88136ddf1958c8e218b345e1697b90a1127092cd16562" exitCode=2 Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.667498 4664 generic.go:334] "Generic (PLEG): container finished" podID="386a209e-a350-45ad-855d-befdb86c0019" containerID="598f23a4ebaab0215f7b1e2aa79b3c861069ffd43514734524273f9368b3590e" exitCode=0 Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.667553 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerDied","Data":"faa10799d7c2c8288cb88136ddf1958c8e218b345e1697b90a1127092cd16562"} Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.667579 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerDied","Data":"598f23a4ebaab0215f7b1e2aa79b3c861069ffd43514734524273f9368b3590e"} Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.688785 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6657f6fcdb-mmtx6"] Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.696869 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-6657f6fcdb-mmtx6"] Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.703920 4664 scope.go:117] "RemoveContainer" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" Oct 13 07:05:43 crc kubenswrapper[4664]: E1013 07:05:43.704850 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6\": container with ID starting with 4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6 not found: ID does not exist" containerID="4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6" Oct 13 07:05:43 crc kubenswrapper[4664]: I1013 07:05:43.704881 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6"} err="failed to get container status \"4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6\": rpc error: code = NotFound desc = could not find container \"4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6\": container with ID starting with 4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6 not found: ID does not exist" Oct 13 07:05:45 crc kubenswrapper[4664]: I1013 07:05:45.058342 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de4b5d93-3b16-4103-8688-3365fc6302a8" path="/var/lib/kubelet/pods/de4b5d93-3b16-4103-8688-3365fc6302a8/volumes" Oct 13 07:05:47 crc kubenswrapper[4664]: I1013 07:05:47.740714 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:05:47 crc kubenswrapper[4664]: I1013 07:05:47.873751 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:05:54 crc kubenswrapper[4664]: I1013 07:05:54.814757 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" event={"ID":"6d138f3a-fa9c-4ae4-873e-a8c335b0635d","Type":"ContainerStarted","Data":"b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee"} Oct 13 07:05:54 crc kubenswrapper[4664]: I1013 07:05:54.832090 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" podStartSLOduration=3.049218794 podStartE2EDuration="17.832072941s" podCreationTimestamp="2025-10-13 07:05:37 +0000 UTC" firstStartedPulling="2025-10-13 07:05:38.914543527 +0000 UTC m=+1146.601988719" lastFinishedPulling="2025-10-13 07:05:53.697397674 +0000 UTC m=+1161.384842866" observedRunningTime="2025-10-13 07:05:54.825668928 +0000 UTC m=+1162.513114130" watchObservedRunningTime="2025-10-13 07:05:54.832072941 +0000 UTC m=+1162.519518133" Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.738765 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.739093 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.739863 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"952bdb7363dae79d4926e1ca1332d7aa4c3a95f0e8a9bb7e1c14b8d80cd4c504"} pod="openstack/horizon-8487d6c5d4-cgnm9" containerMessage="Container horizon failed startup probe, will be restarted" Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.740087 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" containerID="cri-o://952bdb7363dae79d4926e1ca1332d7aa4c3a95f0e8a9bb7e1c14b8d80cd4c504" gracePeriod=30 Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.871500 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.871586 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.872420 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"3d2f032d21ef8d8ed4b549c11a26aedfe8563fd4c11924d540cf80427fd370c5"} pod="openstack/horizon-7d78c558d-rjg4v" containerMessage="Container horizon failed startup probe, will be restarted" Oct 13 07:05:57 crc kubenswrapper[4664]: I1013 07:05:57.872467 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" containerID="cri-o://3d2f032d21ef8d8ed4b549c11a26aedfe8563fd4c11924d540cf80427fd370c5" gracePeriod=30 Oct 13 07:06:01 crc kubenswrapper[4664]: E1013 07:06:01.106094 4664 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/8f55a98af1b1f418518e5a60d3c8f4b562788a1752571af6e6d2547d0e73a61d/diff" to get inode usage: stat /var/lib/containers/storage/overlay/8f55a98af1b1f418518e5a60d3c8f4b562788a1752571af6e6d2547d0e73a61d/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_heat-engine-6657f6fcdb-mmtx6_de4b5d93-3b16-4103-8688-3365fc6302a8/heat-engine/0.log" to get inode usage: stat /var/log/pods/openstack_heat-engine-6657f6fcdb-mmtx6_de4b5d93-3b16-4103-8688-3365fc6302a8/heat-engine/0.log: no such file or directory Oct 13 07:06:06 crc kubenswrapper[4664]: I1013 07:06:06.948185 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Oct 13 07:06:07 crc kubenswrapper[4664]: I1013 07:06:07.935271 4664 generic.go:334] "Generic (PLEG): container finished" podID="6d138f3a-fa9c-4ae4-873e-a8c335b0635d" containerID="b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee" exitCode=0 Oct 13 07:06:07 crc kubenswrapper[4664]: I1013 07:06:07.935323 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" event={"ID":"6d138f3a-fa9c-4ae4-873e-a8c335b0635d","Type":"ContainerDied","Data":"b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee"} Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.461631 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.500057 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6mqt\" (UniqueName: \"kubernetes.io/projected/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-kube-api-access-w6mqt\") pod \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.500162 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-config-data\") pod \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.500210 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-combined-ca-bundle\") pod \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.500336 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-scripts\") pod \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\" (UID: \"6d138f3a-fa9c-4ae4-873e-a8c335b0635d\") " Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.506130 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-scripts" (OuterVolumeSpecName: "scripts") pod "6d138f3a-fa9c-4ae4-873e-a8c335b0635d" (UID: "6d138f3a-fa9c-4ae4-873e-a8c335b0635d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.526940 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-kube-api-access-w6mqt" (OuterVolumeSpecName: "kube-api-access-w6mqt") pod "6d138f3a-fa9c-4ae4-873e-a8c335b0635d" (UID: "6d138f3a-fa9c-4ae4-873e-a8c335b0635d"). InnerVolumeSpecName "kube-api-access-w6mqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.545589 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-config-data" (OuterVolumeSpecName: "config-data") pod "6d138f3a-fa9c-4ae4-873e-a8c335b0635d" (UID: "6d138f3a-fa9c-4ae4-873e-a8c335b0635d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.550880 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d138f3a-fa9c-4ae4-873e-a8c335b0635d" (UID: "6d138f3a-fa9c-4ae4-873e-a8c335b0635d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.602575 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.602909 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6mqt\" (UniqueName: \"kubernetes.io/projected/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-kube-api-access-w6mqt\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.602924 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.602936 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d138f3a-fa9c-4ae4-873e-a8c335b0635d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.957358 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" event={"ID":"6d138f3a-fa9c-4ae4-873e-a8c335b0635d","Type":"ContainerDied","Data":"b833a939b7a7394ef50c3e72f02dd6f83918555cd29289070c996bc76f069f78"} Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.957640 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b833a939b7a7394ef50c3e72f02dd6f83918555cd29289070c996bc76f069f78" Oct 13 07:06:09 crc kubenswrapper[4664]: I1013 07:06:09.957453 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8bkjv" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.088554 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 13 07:06:10 crc kubenswrapper[4664]: E1013 07:06:10.089025 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de4b5d93-3b16-4103-8688-3365fc6302a8" containerName="heat-engine" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.089048 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="de4b5d93-3b16-4103-8688-3365fc6302a8" containerName="heat-engine" Oct 13 07:06:10 crc kubenswrapper[4664]: E1013 07:06:10.089060 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d138f3a-fa9c-4ae4-873e-a8c335b0635d" containerName="nova-cell0-conductor-db-sync" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.089066 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d138f3a-fa9c-4ae4-873e-a8c335b0635d" containerName="nova-cell0-conductor-db-sync" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.089282 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d138f3a-fa9c-4ae4-873e-a8c335b0635d" containerName="nova-cell0-conductor-db-sync" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.089299 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="de4b5d93-3b16-4103-8688-3365fc6302a8" containerName="heat-engine" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.090133 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.095882 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-wc9bh" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.099980 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.172076 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.216738 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.216819 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.216842 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68k28\" (UniqueName: \"kubernetes.io/projected/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-kube-api-access-68k28\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.319045 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.319113 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.319134 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68k28\" (UniqueName: \"kubernetes.io/projected/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-kube-api-access-68k28\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.323179 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.331548 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.334263 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68k28\" (UniqueName: \"kubernetes.io/projected/1e95e8ed-be4d-4f4e-ad35-3e0314d294f7-kube-api-access-68k28\") pod \"nova-cell0-conductor-0\" (UID: \"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7\") " pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.423612 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:10 crc kubenswrapper[4664]: I1013 07:06:10.969693 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 13 07:06:11 crc kubenswrapper[4664]: I1013 07:06:11.975133 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7","Type":"ContainerStarted","Data":"3ffbe9240621afebff0d6d1d629002b3177c26d28a5c38be02740d1f137716d2"} Oct 13 07:06:11 crc kubenswrapper[4664]: I1013 07:06:11.975673 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1e95e8ed-be4d-4f4e-ad35-3e0314d294f7","Type":"ContainerStarted","Data":"fc8626898e5c679afab9ffa0f455e33045125fada4c7f683561dc65dcd39143c"} Oct 13 07:06:11 crc kubenswrapper[4664]: I1013 07:06:11.976930 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:12 crc kubenswrapper[4664]: I1013 07:06:12.004388 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.004364938 podStartE2EDuration="2.004364938s" podCreationTimestamp="2025-10-13 07:06:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:11.996539637 +0000 UTC m=+1179.683984849" watchObservedRunningTime="2025-10-13 07:06:12.004364938 +0000 UTC m=+1179.691810130" Oct 13 07:06:12 crc kubenswrapper[4664]: W1013 07:06:12.790115 4664 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d138f3a_fa9c_4ae4_873e_a8c335b0635d.slice/crio-conmon-b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d138f3a_fa9c_4ae4_873e_a8c335b0635d.slice/crio-conmon-b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee.scope: no such file or directory Oct 13 07:06:12 crc kubenswrapper[4664]: W1013 07:06:12.790174 4664 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d138f3a_fa9c_4ae4_873e_a8c335b0635d.slice/crio-b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d138f3a_fa9c_4ae4_873e_a8c335b0635d.slice/crio-b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee.scope: no such file or directory Oct 13 07:06:12 crc kubenswrapper[4664]: I1013 07:06:12.992097 4664 generic.go:334] "Generic (PLEG): container finished" podID="386a209e-a350-45ad-855d-befdb86c0019" containerID="3b225f5839b938ad83046ae3cb9e80fd7575f48b350685eccbf35cb3397d7a06" exitCode=137 Oct 13 07:06:12 crc kubenswrapper[4664]: I1013 07:06:12.992427 4664 generic.go:334] "Generic (PLEG): container finished" podID="386a209e-a350-45ad-855d-befdb86c0019" containerID="0bb91f9917897cf91651c1da2ecd58245235daa1ea380b90176a6833222606d7" exitCode=137 Oct 13 07:06:12 crc kubenswrapper[4664]: I1013 07:06:12.992159 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerDied","Data":"3b225f5839b938ad83046ae3cb9e80fd7575f48b350685eccbf35cb3397d7a06"} Oct 13 07:06:12 crc kubenswrapper[4664]: I1013 07:06:12.992538 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerDied","Data":"0bb91f9917897cf91651c1da2ecd58245235daa1ea380b90176a6833222606d7"} Oct 13 07:06:13 crc kubenswrapper[4664]: E1013 07:06:13.079065 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde4b5d93_3b16_4103_8688_3365fc6302a8.slice/crio-conmon-4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde4b5d93_3b16_4103_8688_3365fc6302a8.slice/crio-99739b443e10e4e60446579ec52458e6c8e65084aa3e2cbb599c5ed517f7aee3\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde4b5d93_3b16_4103_8688_3365fc6302a8.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde4b5d93_3b16_4103_8688_3365fc6302a8.slice/crio-4a637e359c87b83cf90c478861774a4cca776b94e6236f87b10b797163996bf6.scope\": RecentStats: unable to find data in memory cache]" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.322038 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.482402 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-log-httpd\") pod \"386a209e-a350-45ad-855d-befdb86c0019\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.482991 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-scripts\") pod \"386a209e-a350-45ad-855d-befdb86c0019\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.483197 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "386a209e-a350-45ad-855d-befdb86c0019" (UID: "386a209e-a350-45ad-855d-befdb86c0019"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.483912 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-sg-core-conf-yaml\") pod \"386a209e-a350-45ad-855d-befdb86c0019\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.484186 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-combined-ca-bundle\") pod \"386a209e-a350-45ad-855d-befdb86c0019\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.484284 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-config-data\") pod \"386a209e-a350-45ad-855d-befdb86c0019\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.484444 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-run-httpd\") pod \"386a209e-a350-45ad-855d-befdb86c0019\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.484588 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zghx\" (UniqueName: \"kubernetes.io/projected/386a209e-a350-45ad-855d-befdb86c0019-kube-api-access-8zghx\") pod \"386a209e-a350-45ad-855d-befdb86c0019\" (UID: \"386a209e-a350-45ad-855d-befdb86c0019\") " Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.485241 4664 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.485760 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "386a209e-a350-45ad-855d-befdb86c0019" (UID: "386a209e-a350-45ad-855d-befdb86c0019"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.496905 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-scripts" (OuterVolumeSpecName: "scripts") pod "386a209e-a350-45ad-855d-befdb86c0019" (UID: "386a209e-a350-45ad-855d-befdb86c0019"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.514940 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "386a209e-a350-45ad-855d-befdb86c0019" (UID: "386a209e-a350-45ad-855d-befdb86c0019"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.518287 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/386a209e-a350-45ad-855d-befdb86c0019-kube-api-access-8zghx" (OuterVolumeSpecName: "kube-api-access-8zghx") pod "386a209e-a350-45ad-855d-befdb86c0019" (UID: "386a209e-a350-45ad-855d-befdb86c0019"). InnerVolumeSpecName "kube-api-access-8zghx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.582577 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "386a209e-a350-45ad-855d-befdb86c0019" (UID: "386a209e-a350-45ad-855d-befdb86c0019"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.588670 4664 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.588711 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.588724 4664 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/386a209e-a350-45ad-855d-befdb86c0019-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.588743 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zghx\" (UniqueName: \"kubernetes.io/projected/386a209e-a350-45ad-855d-befdb86c0019-kube-api-access-8zghx\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.588759 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.600063 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-config-data" (OuterVolumeSpecName: "config-data") pod "386a209e-a350-45ad-855d-befdb86c0019" (UID: "386a209e-a350-45ad-855d-befdb86c0019"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:13 crc kubenswrapper[4664]: I1013 07:06:13.690751 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/386a209e-a350-45ad-855d-befdb86c0019-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.005029 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"386a209e-a350-45ad-855d-befdb86c0019","Type":"ContainerDied","Data":"58dd02c7e9a312f97d34bbb5ecdf288b49caf59b2ac3a614d1afa5c4927bfcb3"} Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.005108 4664 scope.go:117] "RemoveContainer" containerID="3b225f5839b938ad83046ae3cb9e80fd7575f48b350685eccbf35cb3397d7a06" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.005107 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.050974 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.059750 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.069428 4664 scope.go:117] "RemoveContainer" containerID="faa10799d7c2c8288cb88136ddf1958c8e218b345e1697b90a1127092cd16562" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.070180 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:14 crc kubenswrapper[4664]: E1013 07:06:14.070670 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-notification-agent" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.070765 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-notification-agent" Oct 13 07:06:14 crc kubenswrapper[4664]: E1013 07:06:14.070882 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="proxy-httpd" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.070968 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="proxy-httpd" Oct 13 07:06:14 crc kubenswrapper[4664]: E1013 07:06:14.071065 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="sg-core" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.071133 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="sg-core" Oct 13 07:06:14 crc kubenswrapper[4664]: E1013 07:06:14.071199 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-central-agent" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.071255 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-central-agent" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.071472 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-central-agent" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.071554 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="proxy-httpd" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.071622 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="ceilometer-notification-agent" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.071684 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="386a209e-a350-45ad-855d-befdb86c0019" containerName="sg-core" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.073566 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.078135 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.078544 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.090206 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.096377 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4d2j\" (UniqueName: \"kubernetes.io/projected/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-kube-api-access-r4d2j\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.096619 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.096759 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-config-data\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.096853 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-scripts\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.096966 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-log-httpd\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.097092 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-run-httpd\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.097191 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.142397 4664 scope.go:117] "RemoveContainer" containerID="598f23a4ebaab0215f7b1e2aa79b3c861069ffd43514734524273f9368b3590e" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.168630 4664 scope.go:117] "RemoveContainer" containerID="0bb91f9917897cf91651c1da2ecd58245235daa1ea380b90176a6833222606d7" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.197889 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-config-data\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.197936 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-scripts\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.197989 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-log-httpd\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.198078 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-run-httpd\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.198127 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.198161 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4d2j\" (UniqueName: \"kubernetes.io/projected/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-kube-api-access-r4d2j\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.198215 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.198716 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-log-httpd\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.199162 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-run-httpd\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.202341 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-scripts\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.203010 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.203840 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-config-data\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.216273 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.217095 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4d2j\" (UniqueName: \"kubernetes.io/projected/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-kube-api-access-r4d2j\") pod \"ceilometer-0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.403158 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:14 crc kubenswrapper[4664]: I1013 07:06:14.939609 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:15 crc kubenswrapper[4664]: I1013 07:06:15.017242 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerStarted","Data":"f427834a2b20268fea63fae46d22d6097f79afdef60ba5abcb546d162c2d20ad"} Oct 13 07:06:15 crc kubenswrapper[4664]: I1013 07:06:15.061304 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="386a209e-a350-45ad-855d-befdb86c0019" path="/var/lib/kubelet/pods/386a209e-a350-45ad-855d-befdb86c0019/volumes" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.065923 4664 generic.go:334] "Generic (PLEG): container finished" podID="19797988-adf9-40b6-8e07-f2e0869d3db6" containerID="fe68a7c4dc50ab4c938db733ab5b5b0842178279c84c675970150a2888ceb9c8" exitCode=137 Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.067065 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" event={"ID":"19797988-adf9-40b6-8e07-f2e0869d3db6","Type":"ContainerDied","Data":"fe68a7c4dc50ab4c938db733ab5b5b0842178279c84c675970150a2888ceb9c8"} Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.072078 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerStarted","Data":"e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35"} Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.072119 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerStarted","Data":"4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039"} Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.076105 4664 generic.go:334] "Generic (PLEG): container finished" podID="3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" containerID="eed63fb43c233bd43921992b315b134bb8656b2500af465e4d43e6791aeb4586" exitCode=137 Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.076148 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5758f76974-8cm64" event={"ID":"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723","Type":"ContainerDied","Data":"eed63fb43c233bd43921992b315b134bb8656b2500af465e4d43e6791aeb4586"} Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.076172 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5758f76974-8cm64" event={"ID":"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723","Type":"ContainerDied","Data":"303261dd0260b601d87c3e9aed0c2b1e7aeb2e16082f3242816e62e00ee66ce4"} Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.076182 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="303261dd0260b601d87c3e9aed0c2b1e7aeb2e16082f3242816e62e00ee66ce4" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.091577 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.145848 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-combined-ca-bundle\") pod \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.146499 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5n8l\" (UniqueName: \"kubernetes.io/projected/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-kube-api-access-z5n8l\") pod \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.146723 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data\") pod \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.146744 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data-custom\") pod \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\" (UID: \"3f3f1a5a-9692-4bd5-922e-9ec7bd47f723\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.164233 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" (UID: "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.191427 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-kube-api-access-z5n8l" (OuterVolumeSpecName: "kube-api-access-z5n8l") pod "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" (UID: "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723"). InnerVolumeSpecName "kube-api-access-z5n8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.220745 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" (UID: "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.236724 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data" (OuterVolumeSpecName: "config-data") pod "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" (UID: "3f3f1a5a-9692-4bd5-922e-9ec7bd47f723"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.250555 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.250611 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.250631 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.250646 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5n8l\" (UniqueName: \"kubernetes.io/projected/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723-kube-api-access-z5n8l\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.325015 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.351523 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kkkb\" (UniqueName: \"kubernetes.io/projected/19797988-adf9-40b6-8e07-f2e0869d3db6-kube-api-access-6kkkb\") pod \"19797988-adf9-40b6-8e07-f2e0869d3db6\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.351617 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data\") pod \"19797988-adf9-40b6-8e07-f2e0869d3db6\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.351670 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-combined-ca-bundle\") pod \"19797988-adf9-40b6-8e07-f2e0869d3db6\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.351782 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data-custom\") pod \"19797988-adf9-40b6-8e07-f2e0869d3db6\" (UID: \"19797988-adf9-40b6-8e07-f2e0869d3db6\") " Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.356526 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "19797988-adf9-40b6-8e07-f2e0869d3db6" (UID: "19797988-adf9-40b6-8e07-f2e0869d3db6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.357144 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19797988-adf9-40b6-8e07-f2e0869d3db6-kube-api-access-6kkkb" (OuterVolumeSpecName: "kube-api-access-6kkkb") pod "19797988-adf9-40b6-8e07-f2e0869d3db6" (UID: "19797988-adf9-40b6-8e07-f2e0869d3db6"). InnerVolumeSpecName "kube-api-access-6kkkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.410023 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19797988-adf9-40b6-8e07-f2e0869d3db6" (UID: "19797988-adf9-40b6-8e07-f2e0869d3db6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.427116 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data" (OuterVolumeSpecName: "config-data") pod "19797988-adf9-40b6-8e07-f2e0869d3db6" (UID: "19797988-adf9-40b6-8e07-f2e0869d3db6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.453722 4664 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.453761 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kkkb\" (UniqueName: \"kubernetes.io/projected/19797988-adf9-40b6-8e07-f2e0869d3db6-kube-api-access-6kkkb\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.453774 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.453782 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19797988-adf9-40b6-8e07-f2e0869d3db6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:16 crc kubenswrapper[4664]: I1013 07:06:16.933868 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.087881 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" event={"ID":"19797988-adf9-40b6-8e07-f2e0869d3db6","Type":"ContainerDied","Data":"6f61f1dbe0ff42a3082d90102dcb4ea2251f5c06da2f4ea58237ffb852d30b7b"} Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.089075 4664 scope.go:117] "RemoveContainer" containerID="fe68a7c4dc50ab4c938db733ab5b5b0842178279c84c675970150a2888ceb9c8" Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.089343 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f7d4d9596-4hkft" Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.096728 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5758f76974-8cm64" Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.097503 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerStarted","Data":"0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5"} Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.121922 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6f7d4d9596-4hkft"] Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.131159 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-6f7d4d9596-4hkft"] Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.157241 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5758f76974-8cm64"] Oct 13 07:06:17 crc kubenswrapper[4664]: I1013 07:06:17.173599 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5758f76974-8cm64"] Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.058312 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19797988-adf9-40b6-8e07-f2e0869d3db6" path="/var/lib/kubelet/pods/19797988-adf9-40b6-8e07-f2e0869d3db6/volumes" Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.060364 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" path="/var/lib/kubelet/pods/3f3f1a5a-9692-4bd5-922e-9ec7bd47f723/volumes" Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.119157 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerStarted","Data":"be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20"} Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.119369 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.119345 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-central-agent" containerID="cri-o://4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039" gracePeriod=30 Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.119550 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="proxy-httpd" containerID="cri-o://be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20" gracePeriod=30 Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.119624 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="sg-core" containerID="cri-o://0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5" gracePeriod=30 Oct 13 07:06:19 crc kubenswrapper[4664]: I1013 07:06:19.119693 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-notification-agent" containerID="cri-o://e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35" gracePeriod=30 Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.130386 4664 generic.go:334] "Generic (PLEG): container finished" podID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerID="be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20" exitCode=0 Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.130662 4664 generic.go:334] "Generic (PLEG): container finished" podID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerID="0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5" exitCode=2 Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.130671 4664 generic.go:334] "Generic (PLEG): container finished" podID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerID="e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35" exitCode=0 Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.130470 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerDied","Data":"be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20"} Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.130706 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerDied","Data":"0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5"} Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.130721 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerDied","Data":"e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35"} Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.452240 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Oct 13 07:06:20 crc kubenswrapper[4664]: I1013 07:06:20.481650 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.511553122 podStartE2EDuration="6.481631634s" podCreationTimestamp="2025-10-13 07:06:14 +0000 UTC" firstStartedPulling="2025-10-13 07:06:14.965365384 +0000 UTC m=+1182.652810566" lastFinishedPulling="2025-10-13 07:06:17.935443886 +0000 UTC m=+1185.622889078" observedRunningTime="2025-10-13 07:06:19.155352658 +0000 UTC m=+1186.842797860" watchObservedRunningTime="2025-10-13 07:06:20.481631634 +0000 UTC m=+1188.169076816" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.040021 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-2fnxr"] Oct 13 07:06:21 crc kubenswrapper[4664]: E1013 07:06:21.040821 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" containerName="heat-api" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.040902 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" containerName="heat-api" Oct 13 07:06:21 crc kubenswrapper[4664]: E1013 07:06:21.040985 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19797988-adf9-40b6-8e07-f2e0869d3db6" containerName="heat-cfnapi" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.041047 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="19797988-adf9-40b6-8e07-f2e0869d3db6" containerName="heat-cfnapi" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.041267 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="19797988-adf9-40b6-8e07-f2e0869d3db6" containerName="heat-cfnapi" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.041348 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f3f1a5a-9692-4bd5-922e-9ec7bd47f723" containerName="heat-api" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.041991 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.044053 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.046089 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.090755 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2fnxr"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.146528 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.146584 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-scripts\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.146630 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-config-data\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.146975 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgq8l\" (UniqueName: \"kubernetes.io/projected/b2b63677-e86e-4de3-8091-1c36dc8fab29-kube-api-access-tgq8l\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.259435 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgq8l\" (UniqueName: \"kubernetes.io/projected/b2b63677-e86e-4de3-8091-1c36dc8fab29-kube-api-access-tgq8l\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.259557 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.259603 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-scripts\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.259661 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-config-data\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.268099 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-config-data\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.268368 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.280458 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.282083 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.284480 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-scripts\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.298708 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.306242 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.329425 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.329670 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgq8l\" (UniqueName: \"kubernetes.io/projected/b2b63677-e86e-4de3-8091-1c36dc8fab29-kube-api-access-tgq8l\") pod \"nova-cell0-cell-mapping-2fnxr\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.335029 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.345055 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.361131 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.361205 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj4mr\" (UniqueName: \"kubernetes.io/projected/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-kube-api-access-qj4mr\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.361244 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6f2ac68-c742-4ace-8ead-35c43f62c679-logs\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.361281 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.361410 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.361475 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9wjl\" (UniqueName: \"kubernetes.io/projected/f6f2ac68-c742-4ace-8ead-35c43f62c679-kube-api-access-z9wjl\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.361505 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-config-data\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.396930 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.399508 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.454065 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.455360 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.461594 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.463466 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.463545 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9wjl\" (UniqueName: \"kubernetes.io/projected/f6f2ac68-c742-4ace-8ead-35c43f62c679-kube-api-access-z9wjl\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.463575 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-config-data\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.463604 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.463651 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj4mr\" (UniqueName: \"kubernetes.io/projected/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-kube-api-access-qj4mr\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.463675 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6f2ac68-c742-4ace-8ead-35c43f62c679-logs\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.463707 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.477591 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.479555 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.481208 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6f2ac68-c742-4ace-8ead-35c43f62c679-logs\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.495184 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.502435 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-config-data\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.509397 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj4mr\" (UniqueName: \"kubernetes.io/projected/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-kube-api-access-qj4mr\") pod \"nova-cell1-novncproxy-0\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.518016 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.530080 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9wjl\" (UniqueName: \"kubernetes.io/projected/f6f2ac68-c742-4ace-8ead-35c43f62c679-kube-api-access-z9wjl\") pod \"nova-api-0\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.565883 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-457fl\" (UniqueName: \"kubernetes.io/projected/f088ee30-e602-4c3d-b212-da3436e29c67-kube-api-access-457fl\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.565968 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.566028 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-config-data\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.580282 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.650550 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.652143 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.658010 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.661141 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.676826 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.676917 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-config-data\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.677002 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-457fl\" (UniqueName: \"kubernetes.io/projected/f088ee30-e602-4c3d-b212-da3436e29c67-kube-api-access-457fl\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.697693 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.701231 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-config-data\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.715574 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-457fl\" (UniqueName: \"kubernetes.io/projected/f088ee30-e602-4c3d-b212-da3436e29c67-kube-api-access-457fl\") pod \"nova-scheduler-0\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.780203 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-config-data\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.786644 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/586d3efb-8e4c-4c45-b7ee-cc493554df2a-logs\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.786779 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.786930 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwh2t\" (UniqueName: \"kubernetes.io/projected/586d3efb-8e4c-4c45-b7ee-cc493554df2a-kube-api-access-cwh2t\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.831599 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.856350 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69fd679865-wq8lz"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.857837 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.891928 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-config-data\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.892020 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/586d3efb-8e4c-4c45-b7ee-cc493554df2a-logs\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.892036 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwh2t\" (UniqueName: \"kubernetes.io/projected/586d3efb-8e4c-4c45-b7ee-cc493554df2a-kube-api-access-cwh2t\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.892051 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.893247 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/586d3efb-8e4c-4c45-b7ee-cc493554df2a-logs\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.897163 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.910837 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-config-data\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.912244 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.912709 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69fd679865-wq8lz"] Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.922601 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwh2t\" (UniqueName: \"kubernetes.io/projected/586d3efb-8e4c-4c45-b7ee-cc493554df2a-kube-api-access-cwh2t\") pod \"nova-metadata-0\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.993565 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.994250 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr4pc\" (UniqueName: \"kubernetes.io/projected/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-kube-api-access-wr4pc\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.994307 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-sb\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.994356 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-svc\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.994407 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-config\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.994473 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-nb\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:21 crc kubenswrapper[4664]: I1013 07:06:21.994492 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-swift-storage-0\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.096759 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-nb\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.096806 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-swift-storage-0\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.096847 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr4pc\" (UniqueName: \"kubernetes.io/projected/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-kube-api-access-wr4pc\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.096871 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-sb\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.096916 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-svc\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.096960 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-config\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.098031 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-swift-storage-0\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.098093 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-nb\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.102490 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-svc\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.103504 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-sb\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.103743 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-config\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.117765 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr4pc\" (UniqueName: \"kubernetes.io/projected/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-kube-api-access-wr4pc\") pod \"dnsmasq-dns-69fd679865-wq8lz\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.213330 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.265735 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2fnxr"] Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.451361 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:06:22 crc kubenswrapper[4664]: W1013 07:06:22.465605 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode74a1282_6d8d_4fdd_ab30_b4ba72ddc819.slice/crio-0dca3a32c76e3e4e9d87f615ed196f6647dc214730244e3c1d3644d608f7a98c WatchSource:0}: Error finding container 0dca3a32c76e3e4e9d87f615ed196f6647dc214730244e3c1d3644d608f7a98c: Status 404 returned error can't find the container with id 0dca3a32c76e3e4e9d87f615ed196f6647dc214730244e3c1d3644d608f7a98c Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.653223 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.752922 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:22 crc kubenswrapper[4664]: W1013 07:06:22.766772 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6f2ac68_c742_4ace_8ead_35c43f62c679.slice/crio-dafe26fe9e25c4c719348834e535351b9e98b5d5f7a114076124a7091e4c3def WatchSource:0}: Error finding container dafe26fe9e25c4c719348834e535351b9e98b5d5f7a114076124a7091e4c3def: Status 404 returned error can't find the container with id dafe26fe9e25c4c719348834e535351b9e98b5d5f7a114076124a7091e4c3def Oct 13 07:06:22 crc kubenswrapper[4664]: I1013 07:06:22.904785 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:22 crc kubenswrapper[4664]: W1013 07:06:22.954762 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod586d3efb_8e4c_4c45_b7ee_cc493554df2a.slice/crio-90eaf66a635759491b5bc2839b6960669d7dad12d5e438080a58ed09487a6f3b WatchSource:0}: Error finding container 90eaf66a635759491b5bc2839b6960669d7dad12d5e438080a58ed09487a6f3b: Status 404 returned error can't find the container with id 90eaf66a635759491b5bc2839b6960669d7dad12d5e438080a58ed09487a6f3b Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.087962 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69fd679865-wq8lz"] Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.227033 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f6f2ac68-c742-4ace-8ead-35c43f62c679","Type":"ContainerStarted","Data":"dafe26fe9e25c4c719348834e535351b9e98b5d5f7a114076124a7091e4c3def"} Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.240199 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"586d3efb-8e4c-4c45-b7ee-cc493554df2a","Type":"ContainerStarted","Data":"90eaf66a635759491b5bc2839b6960669d7dad12d5e438080a58ed09487a6f3b"} Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.267388 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819","Type":"ContainerStarted","Data":"0dca3a32c76e3e4e9d87f615ed196f6647dc214730244e3c1d3644d608f7a98c"} Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.284619 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2fnxr" event={"ID":"b2b63677-e86e-4de3-8091-1c36dc8fab29","Type":"ContainerStarted","Data":"1263f9aab96e6a4f8c058ebe5805cd59868dea520ffee7fc4ac711c665860314"} Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.284664 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2fnxr" event={"ID":"b2b63677-e86e-4de3-8091-1c36dc8fab29","Type":"ContainerStarted","Data":"4786ecafecf35cc6b444adb54e2c7b8653e13be8ed8a024a4296e97ea59e7b56"} Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.302678 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f088ee30-e602-4c3d-b212-da3436e29c67","Type":"ContainerStarted","Data":"7a718f62398a2712f9af33167a6c087ab906ec9120f8dea82f9d937255c09b0e"} Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.309744 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" event={"ID":"9cb0e2b8-6d81-49b4-850e-8aa72b02c626","Type":"ContainerStarted","Data":"d997cdb1f495af536868f698a4be448a1ab9dd853235ad712d354b5c487c62eb"} Oct 13 07:06:23 crc kubenswrapper[4664]: I1013 07:06:23.390363 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-2fnxr" podStartSLOduration=2.390344994 podStartE2EDuration="2.390344994s" podCreationTimestamp="2025-10-13 07:06:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:23.389043609 +0000 UTC m=+1191.076488801" watchObservedRunningTime="2025-10-13 07:06:23.390344994 +0000 UTC m=+1191.077790186" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.327654 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-q9n4w"] Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.332602 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.336750 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.336963 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.341502 4664 generic.go:334] "Generic (PLEG): container finished" podID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerID="1e1df26a5e3f60fa964d972da6732ffebf705f2141f9aaa6cb9954b83d6a2069" exitCode=0 Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.341613 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" event={"ID":"9cb0e2b8-6d81-49b4-850e-8aa72b02c626","Type":"ContainerDied","Data":"1e1df26a5e3f60fa964d972da6732ffebf705f2141f9aaa6cb9954b83d6a2069"} Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.347406 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-q9n4w"] Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.377813 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f52l7\" (UniqueName: \"kubernetes.io/projected/7b3061d9-9611-4c41-af2e-5c978dc8032c-kube-api-access-f52l7\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.378052 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-scripts\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.378354 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.379001 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-config-data\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.480350 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.480453 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-config-data\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.480486 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f52l7\" (UniqueName: \"kubernetes.io/projected/7b3061d9-9611-4c41-af2e-5c978dc8032c-kube-api-access-f52l7\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.480506 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-scripts\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.488235 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-scripts\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.495614 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.496011 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-config-data\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.521463 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f52l7\" (UniqueName: \"kubernetes.io/projected/7b3061d9-9611-4c41-af2e-5c978dc8032c-kube-api-access-f52l7\") pod \"nova-cell1-conductor-db-sync-q9n4w\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:24 crc kubenswrapper[4664]: I1013 07:06:24.611213 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:25 crc kubenswrapper[4664]: I1013 07:06:25.245833 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-q9n4w"] Oct 13 07:06:25 crc kubenswrapper[4664]: I1013 07:06:25.358451 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" event={"ID":"9cb0e2b8-6d81-49b4-850e-8aa72b02c626","Type":"ContainerStarted","Data":"86fca4589d305b331ec63e8d7d414f93c8a8bcfff146c29aa2cf5a69eb3be901"} Oct 13 07:06:25 crc kubenswrapper[4664]: I1013 07:06:25.359819 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:25 crc kubenswrapper[4664]: I1013 07:06:25.396564 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" podStartSLOduration=4.396541185 podStartE2EDuration="4.396541185s" podCreationTimestamp="2025-10-13 07:06:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:25.380601155 +0000 UTC m=+1193.068046357" watchObservedRunningTime="2025-10-13 07:06:25.396541185 +0000 UTC m=+1193.083986377" Oct 13 07:06:26 crc kubenswrapper[4664]: W1013 07:06:26.019286 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b3061d9_9611_4c41_af2e_5c978dc8032c.slice/crio-c9f92f45d87d5206cf12a5ca8a95e845dce31a21d60f0fbca7d476d3efb785c0 WatchSource:0}: Error finding container c9f92f45d87d5206cf12a5ca8a95e845dce31a21d60f0fbca7d476d3efb785c0: Status 404 returned error can't find the container with id c9f92f45d87d5206cf12a5ca8a95e845dce31a21d60f0fbca7d476d3efb785c0 Oct 13 07:06:26 crc kubenswrapper[4664]: I1013 07:06:26.370138 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" event={"ID":"7b3061d9-9611-4c41-af2e-5c978dc8032c","Type":"ContainerStarted","Data":"c9f92f45d87d5206cf12a5ca8a95e845dce31a21d60f0fbca7d476d3efb785c0"} Oct 13 07:06:26 crc kubenswrapper[4664]: I1013 07:06:26.395071 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:26 crc kubenswrapper[4664]: I1013 07:06:26.455140 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.392828 4664 generic.go:334] "Generic (PLEG): container finished" podID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerID="952bdb7363dae79d4926e1ca1332d7aa4c3a95f0e8a9bb7e1c14b8d80cd4c504" exitCode=137 Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.393007 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerDied","Data":"952bdb7363dae79d4926e1ca1332d7aa4c3a95f0e8a9bb7e1c14b8d80cd4c504"} Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.393299 4664 scope.go:117] "RemoveContainer" containerID="edb368fb2b90a86f8b5cf8dbcdbe841a8c7ef1178d9d56a64c1acfb2e63d34c0" Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.397364 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" event={"ID":"7b3061d9-9611-4c41-af2e-5c978dc8032c","Type":"ContainerStarted","Data":"528c03dfb526f85e0949f7bd1aa1def2b8e23859b10e0221f02932b38a3e81a6"} Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.408123 4664 generic.go:334] "Generic (PLEG): container finished" podID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerID="3d2f032d21ef8d8ed4b549c11a26aedfe8563fd4c11924d540cf80427fd370c5" exitCode=137 Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.408169 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerDied","Data":"3d2f032d21ef8d8ed4b549c11a26aedfe8563fd4c11924d540cf80427fd370c5"} Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.408201 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerStarted","Data":"716b360bd426e69bd996488b830583fb95239ac8059eb566e7ac7b80470cc331"} Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.418921 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" podStartSLOduration=4.418903643 podStartE2EDuration="4.418903643s" podCreationTimestamp="2025-10-13 07:06:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:28.414389262 +0000 UTC m=+1196.101834454" watchObservedRunningTime="2025-10-13 07:06:28.418903643 +0000 UTC m=+1196.106348835" Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.671360 4664 scope.go:117] "RemoveContainer" containerID="6dd7403fc2ce851531d7d9fd3780f8fa218eb32de5a0368ce4f3d96e46b9049b" Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.812490 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:06:28 crc kubenswrapper[4664]: I1013 07:06:28.812840 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.270693 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.370889 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-sg-core-conf-yaml\") pod \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.370945 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-combined-ca-bundle\") pod \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.371405 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-log-httpd\") pod \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.371562 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-scripts\") pod \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.371656 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-config-data\") pod \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.371750 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-run-httpd\") pod \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.371806 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4d2j\" (UniqueName: \"kubernetes.io/projected/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-kube-api-access-r4d2j\") pod \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\" (UID: \"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0\") " Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.372845 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" (UID: "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.373233 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" (UID: "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.376122 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-kube-api-access-r4d2j" (OuterVolumeSpecName: "kube-api-access-r4d2j") pod "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" (UID: "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0"). InnerVolumeSpecName "kube-api-access-r4d2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.383936 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-scripts" (OuterVolumeSpecName: "scripts") pod "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" (UID: "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.408158 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" (UID: "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.431320 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f088ee30-e602-4c3d-b212-da3436e29c67","Type":"ContainerStarted","Data":"beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5"} Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.434884 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerStarted","Data":"c8df9bd385c5532e5d86f910ff25bb0dd8e4b604a40eb79ea6d7d648ffc05ad6"} Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.453847 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f6f2ac68-c742-4ace-8ead-35c43f62c679","Type":"ContainerStarted","Data":"6815bd9a0a01d33358499fe3caf5812c84146ba0b0b503742730fcd74f10a4b3"} Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.460976 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"586d3efb-8e4c-4c45-b7ee-cc493554df2a","Type":"ContainerStarted","Data":"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63"} Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.466489 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.137112009 podStartE2EDuration="8.466465076s" podCreationTimestamp="2025-10-13 07:06:21 +0000 UTC" firstStartedPulling="2025-10-13 07:06:22.683535185 +0000 UTC m=+1190.370980377" lastFinishedPulling="2025-10-13 07:06:28.012888252 +0000 UTC m=+1195.700333444" observedRunningTime="2025-10-13 07:06:29.452187822 +0000 UTC m=+1197.139633014" watchObservedRunningTime="2025-10-13 07:06:29.466465076 +0000 UTC m=+1197.153910268" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.469711 4664 generic.go:334] "Generic (PLEG): container finished" podID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerID="4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039" exitCode=0 Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.469875 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.469901 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerDied","Data":"4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039"} Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.469942 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0","Type":"ContainerDied","Data":"f427834a2b20268fea63fae46d22d6097f79afdef60ba5abcb546d162c2d20ad"} Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.469957 4664 scope.go:117] "RemoveContainer" containerID="be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.473841 4664 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.473866 4664 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.473876 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.473899 4664 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.473911 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4d2j\" (UniqueName: \"kubernetes.io/projected/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-kube-api-access-r4d2j\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.477812 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca" gracePeriod=30 Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.479181 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819","Type":"ContainerStarted","Data":"c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca"} Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.502406 4664 scope.go:117] "RemoveContainer" containerID="0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.519242 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.983943304 podStartE2EDuration="8.519220376s" podCreationTimestamp="2025-10-13 07:06:21 +0000 UTC" firstStartedPulling="2025-10-13 07:06:22.475578376 +0000 UTC m=+1190.163023568" lastFinishedPulling="2025-10-13 07:06:28.010855448 +0000 UTC m=+1195.698300640" observedRunningTime="2025-10-13 07:06:29.515302001 +0000 UTC m=+1197.202747203" watchObservedRunningTime="2025-10-13 07:06:29.519220376 +0000 UTC m=+1197.206665568" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.520964 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" (UID: "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.539697 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-config-data" (OuterVolumeSpecName: "config-data") pod "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" (UID: "a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.542054 4664 scope.go:117] "RemoveContainer" containerID="e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.575499 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.575775 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.577683 4664 scope.go:117] "RemoveContainer" containerID="4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.607742 4664 scope.go:117] "RemoveContainer" containerID="be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20" Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.608359 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20\": container with ID starting with be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20 not found: ID does not exist" containerID="be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.608434 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20"} err="failed to get container status \"be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20\": rpc error: code = NotFound desc = could not find container \"be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20\": container with ID starting with be0df3e644d13e0e2bb637d981f35a3b356346e6e39813388ce2ff23f37cfc20 not found: ID does not exist" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.608465 4664 scope.go:117] "RemoveContainer" containerID="0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5" Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.609279 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5\": container with ID starting with 0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5 not found: ID does not exist" containerID="0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.609326 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5"} err="failed to get container status \"0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5\": rpc error: code = NotFound desc = could not find container \"0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5\": container with ID starting with 0762eaea21fc840028fdf72ec8522e5de180d64ec3ab1b945b76405c739d79a5 not found: ID does not exist" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.609346 4664 scope.go:117] "RemoveContainer" containerID="e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35" Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.611587 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35\": container with ID starting with e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35 not found: ID does not exist" containerID="e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.611639 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35"} err="failed to get container status \"e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35\": rpc error: code = NotFound desc = could not find container \"e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35\": container with ID starting with e8936959c865bb483375be87f78ae97f37fc68780757b927c9fd7fe3175c3f35 not found: ID does not exist" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.611716 4664 scope.go:117] "RemoveContainer" containerID="4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039" Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.612257 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039\": container with ID starting with 4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039 not found: ID does not exist" containerID="4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.612300 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039"} err="failed to get container status \"4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039\": rpc error: code = NotFound desc = could not find container \"4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039\": container with ID starting with 4c5b373d2c39025dfa6a2c5a797185e4bd84ca1c61e802bfe3a12cf8e76c4039 not found: ID does not exist" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.813262 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.820442 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.842053 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.844271 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-notification-agent" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.844405 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-notification-agent" Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.844509 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="sg-core" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.844577 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="sg-core" Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.844652 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-central-agent" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.844725 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-central-agent" Oct 13 07:06:29 crc kubenswrapper[4664]: E1013 07:06:29.844785 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="proxy-httpd" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.844856 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="proxy-httpd" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.845128 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-notification-agent" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.845213 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="proxy-httpd" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.845289 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="ceilometer-central-agent" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.845372 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" containerName="sg-core" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.847399 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.855131 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.855723 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.901353 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.982170 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9vbn\" (UniqueName: \"kubernetes.io/projected/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-kube-api-access-q9vbn\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.982248 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-config-data\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.982280 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.982321 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-log-httpd\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.982337 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.982354 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-scripts\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:29 crc kubenswrapper[4664]: I1013 07:06:29.982387 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-run-httpd\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.084025 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9vbn\" (UniqueName: \"kubernetes.io/projected/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-kube-api-access-q9vbn\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.084111 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-config-data\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.084141 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.084220 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-log-httpd\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.084236 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.084254 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-scripts\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.084305 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-run-httpd\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.086162 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-log-httpd\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.086261 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-run-httpd\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.091630 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.092447 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-config-data\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.092895 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.105257 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-scripts\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.113414 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9vbn\" (UniqueName: \"kubernetes.io/projected/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-kube-api-access-q9vbn\") pod \"ceilometer-0\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.173293 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.535024 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f6f2ac68-c742-4ace-8ead-35c43f62c679","Type":"ContainerStarted","Data":"e746bdb8eb48e1b97bc8ed090f6da2cfd5c9753cb2edfa61ad086b02261dd37e"} Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.569612 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.345425789 podStartE2EDuration="9.569594465s" podCreationTimestamp="2025-10-13 07:06:21 +0000 UTC" firstStartedPulling="2025-10-13 07:06:22.787677988 +0000 UTC m=+1190.475123180" lastFinishedPulling="2025-10-13 07:06:28.011846664 +0000 UTC m=+1195.699291856" observedRunningTime="2025-10-13 07:06:30.562446242 +0000 UTC m=+1198.249891434" watchObservedRunningTime="2025-10-13 07:06:30.569594465 +0000 UTC m=+1198.257039657" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.595211 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"586d3efb-8e4c-4c45-b7ee-cc493554df2a","Type":"ContainerStarted","Data":"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508"} Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.595425 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-log" containerID="cri-o://ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63" gracePeriod=30 Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.595990 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-metadata" containerID="cri-o://c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508" gracePeriod=30 Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.635284 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.465705277 podStartE2EDuration="9.635261983s" podCreationTimestamp="2025-10-13 07:06:21 +0000 UTC" firstStartedPulling="2025-10-13 07:06:22.969969386 +0000 UTC m=+1190.657414588" lastFinishedPulling="2025-10-13 07:06:28.139526102 +0000 UTC m=+1195.826971294" observedRunningTime="2025-10-13 07:06:30.629024105 +0000 UTC m=+1198.316469307" watchObservedRunningTime="2025-10-13 07:06:30.635261983 +0000 UTC m=+1198.322707175" Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.741047 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:06:30 crc kubenswrapper[4664]: I1013 07:06:30.791572 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.061115 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0" path="/var/lib/kubelet/pods/a8bf5a9d-cc6a-468f-89d3-1f355a9bf7e0/volumes" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.358223 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.428234 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-config-data\") pod \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.428372 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwh2t\" (UniqueName: \"kubernetes.io/projected/586d3efb-8e4c-4c45-b7ee-cc493554df2a-kube-api-access-cwh2t\") pod \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.428460 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-combined-ca-bundle\") pod \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.428485 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/586d3efb-8e4c-4c45-b7ee-cc493554df2a-logs\") pod \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\" (UID: \"586d3efb-8e4c-4c45-b7ee-cc493554df2a\") " Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.436276 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/586d3efb-8e4c-4c45-b7ee-cc493554df2a-logs" (OuterVolumeSpecName: "logs") pod "586d3efb-8e4c-4c45-b7ee-cc493554df2a" (UID: "586d3efb-8e4c-4c45-b7ee-cc493554df2a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.456608 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/586d3efb-8e4c-4c45-b7ee-cc493554df2a-kube-api-access-cwh2t" (OuterVolumeSpecName: "kube-api-access-cwh2t") pod "586d3efb-8e4c-4c45-b7ee-cc493554df2a" (UID: "586d3efb-8e4c-4c45-b7ee-cc493554df2a"). InnerVolumeSpecName "kube-api-access-cwh2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.497048 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-config-data" (OuterVolumeSpecName: "config-data") pod "586d3efb-8e4c-4c45-b7ee-cc493554df2a" (UID: "586d3efb-8e4c-4c45-b7ee-cc493554df2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.515518 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "586d3efb-8e4c-4c45-b7ee-cc493554df2a" (UID: "586d3efb-8e4c-4c45-b7ee-cc493554df2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.531000 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.531033 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwh2t\" (UniqueName: \"kubernetes.io/projected/586d3efb-8e4c-4c45-b7ee-cc493554df2a-kube-api-access-cwh2t\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.531044 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586d3efb-8e4c-4c45-b7ee-cc493554df2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.531054 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/586d3efb-8e4c-4c45-b7ee-cc493554df2a-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.576405 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.621749 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerStarted","Data":"c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a"} Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.621886 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerStarted","Data":"61a0b931c22097f8d9c49c836c560971e82a9641b9dda1871802f3a1cf55298b"} Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.623746 4664 generic.go:334] "Generic (PLEG): container finished" podID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerID="c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508" exitCode=0 Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.623772 4664 generic.go:334] "Generic (PLEG): container finished" podID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerID="ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63" exitCode=143 Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.624721 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.627867 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"586d3efb-8e4c-4c45-b7ee-cc493554df2a","Type":"ContainerDied","Data":"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508"} Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.627918 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"586d3efb-8e4c-4c45-b7ee-cc493554df2a","Type":"ContainerDied","Data":"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63"} Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.627930 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"586d3efb-8e4c-4c45-b7ee-cc493554df2a","Type":"ContainerDied","Data":"90eaf66a635759491b5bc2839b6960669d7dad12d5e438080a58ed09487a6f3b"} Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.627948 4664 scope.go:117] "RemoveContainer" containerID="c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.658338 4664 scope.go:117] "RemoveContainer" containerID="ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.675849 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.684065 4664 scope.go:117] "RemoveContainer" containerID="c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508" Oct 13 07:06:31 crc kubenswrapper[4664]: E1013 07:06:31.686328 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508\": container with ID starting with c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508 not found: ID does not exist" containerID="c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.686475 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508"} err="failed to get container status \"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508\": rpc error: code = NotFound desc = could not find container \"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508\": container with ID starting with c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508 not found: ID does not exist" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.686561 4664 scope.go:117] "RemoveContainer" containerID="ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63" Oct 13 07:06:31 crc kubenswrapper[4664]: E1013 07:06:31.688682 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63\": container with ID starting with ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63 not found: ID does not exist" containerID="ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.688774 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63"} err="failed to get container status \"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63\": rpc error: code = NotFound desc = could not find container \"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63\": container with ID starting with ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63 not found: ID does not exist" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.688874 4664 scope.go:117] "RemoveContainer" containerID="c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.689902 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508"} err="failed to get container status \"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508\": rpc error: code = NotFound desc = could not find container \"c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508\": container with ID starting with c8b4b7510282419dabc78c1e82e9684258a3b1cca18da3fbce2b032f0402d508 not found: ID does not exist" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.690005 4664 scope.go:117] "RemoveContainer" containerID="ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.690406 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63"} err="failed to get container status \"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63\": rpc error: code = NotFound desc = could not find container \"ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63\": container with ID starting with ca7d9980cd6d5474027df87b4a09cb718d0d49dce7702b02283aa13a66947a63 not found: ID does not exist" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.693351 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.711309 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:31 crc kubenswrapper[4664]: E1013 07:06:31.712266 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-metadata" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.712346 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-metadata" Oct 13 07:06:31 crc kubenswrapper[4664]: E1013 07:06:31.712411 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-log" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.712466 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-log" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.712714 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-log" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.712808 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" containerName="nova-metadata-metadata" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.713899 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.716480 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.719257 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.723287 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.836431 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.836472 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.839116 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/717ad5f1-331c-4e8c-93e3-04305b2c2a99-logs\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.839150 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.839222 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-config-data\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.839446 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdxwk\" (UniqueName: \"kubernetes.io/projected/717ad5f1-331c-4e8c-93e3-04305b2c2a99-kube-api-access-gdxwk\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.839531 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.898036 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.898307 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.942017 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/717ad5f1-331c-4e8c-93e3-04305b2c2a99-logs\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.942080 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.942197 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-config-data\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.942267 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdxwk\" (UniqueName: \"kubernetes.io/projected/717ad5f1-331c-4e8c-93e3-04305b2c2a99-kube-api-access-gdxwk\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.942308 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.944013 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/717ad5f1-331c-4e8c-93e3-04305b2c2a99-logs\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.949348 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-config-data\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.951147 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.951473 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.967574 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdxwk\" (UniqueName: \"kubernetes.io/projected/717ad5f1-331c-4e8c-93e3-04305b2c2a99-kube-api-access-gdxwk\") pod \"nova-metadata-0\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " pod="openstack/nova-metadata-0" Oct 13 07:06:31 crc kubenswrapper[4664]: I1013 07:06:31.968207 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.061188 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.214716 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.313353 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864486cfc5-dvcpd"] Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.316020 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" podUID="99998d93-3c3d-4269-96b4-260b38f59814" containerName="dnsmasq-dns" containerID="cri-o://7120fe422ce19c2f83735f21bb721fb0dc8fc471ff51697fa259180ba2fe25e1" gracePeriod=10 Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.663778 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerStarted","Data":"05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939"} Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.676561 4664 generic.go:334] "Generic (PLEG): container finished" podID="99998d93-3c3d-4269-96b4-260b38f59814" containerID="7120fe422ce19c2f83735f21bb721fb0dc8fc471ff51697fa259180ba2fe25e1" exitCode=0 Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.677490 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" event={"ID":"99998d93-3c3d-4269-96b4-260b38f59814","Type":"ContainerDied","Data":"7120fe422ce19c2f83735f21bb721fb0dc8fc471ff51697fa259180ba2fe25e1"} Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.707866 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.794419 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.926682 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:06:32 crc kubenswrapper[4664]: I1013 07:06:32.926873 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.105149 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="586d3efb-8e4c-4c45-b7ee-cc493554df2a" path="/var/lib/kubelet/pods/586d3efb-8e4c-4c45-b7ee-cc493554df2a/volumes" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.374875 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.384175 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-sb\") pod \"99998d93-3c3d-4269-96b4-260b38f59814\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.384233 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-nb\") pod \"99998d93-3c3d-4269-96b4-260b38f59814\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.384252 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56tw9\" (UniqueName: \"kubernetes.io/projected/99998d93-3c3d-4269-96b4-260b38f59814-kube-api-access-56tw9\") pod \"99998d93-3c3d-4269-96b4-260b38f59814\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.384276 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-config\") pod \"99998d93-3c3d-4269-96b4-260b38f59814\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.384323 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-swift-storage-0\") pod \"99998d93-3c3d-4269-96b4-260b38f59814\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.384400 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-svc\") pod \"99998d93-3c3d-4269-96b4-260b38f59814\" (UID: \"99998d93-3c3d-4269-96b4-260b38f59814\") " Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.428975 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99998d93-3c3d-4269-96b4-260b38f59814-kube-api-access-56tw9" (OuterVolumeSpecName: "kube-api-access-56tw9") pod "99998d93-3c3d-4269-96b4-260b38f59814" (UID: "99998d93-3c3d-4269-96b4-260b38f59814"). InnerVolumeSpecName "kube-api-access-56tw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.491966 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56tw9\" (UniqueName: \"kubernetes.io/projected/99998d93-3c3d-4269-96b4-260b38f59814-kube-api-access-56tw9\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.622731 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "99998d93-3c3d-4269-96b4-260b38f59814" (UID: "99998d93-3c3d-4269-96b4-260b38f59814"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.662000 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "99998d93-3c3d-4269-96b4-260b38f59814" (UID: "99998d93-3c3d-4269-96b4-260b38f59814"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.696560 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.696586 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.724222 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" event={"ID":"99998d93-3c3d-4269-96b4-260b38f59814","Type":"ContainerDied","Data":"53f862947320f9c575b1dbc672472ee5965db5e4150919d5365672258c93d324"} Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.724287 4664 scope.go:117] "RemoveContainer" containerID="7120fe422ce19c2f83735f21bb721fb0dc8fc471ff51697fa259180ba2fe25e1" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.724462 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864486cfc5-dvcpd" Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.744398 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"717ad5f1-331c-4e8c-93e3-04305b2c2a99","Type":"ContainerStarted","Data":"8d90f1e667aaed57651a58cbbc1974a395b4f94384c883dbed9e803051409a31"} Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.744468 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"717ad5f1-331c-4e8c-93e3-04305b2c2a99","Type":"ContainerStarted","Data":"04ec473bbd078497ef3c31673328c7a148c8f126e8997bf085925cf35836cfce"} Oct 13 07:06:33 crc kubenswrapper[4664]: I1013 07:06:33.963118 4664 scope.go:117] "RemoveContainer" containerID="104777c2a029e611defe288496f6f0ee2ce93c302888d0828c5589e0589523ec" Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.006810 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "99998d93-3c3d-4269-96b4-260b38f59814" (UID: "99998d93-3c3d-4269-96b4-260b38f59814"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.018453 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "99998d93-3c3d-4269-96b4-260b38f59814" (UID: "99998d93-3c3d-4269-96b4-260b38f59814"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.023265 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-config" (OuterVolumeSpecName: "config") pod "99998d93-3c3d-4269-96b4-260b38f59814" (UID: "99998d93-3c3d-4269-96b4-260b38f59814"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.108456 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.108491 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.108505 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99998d93-3c3d-4269-96b4-260b38f59814-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.158926 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864486cfc5-dvcpd"] Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.178501 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-864486cfc5-dvcpd"] Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.755724 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerStarted","Data":"5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce"} Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.757786 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"717ad5f1-331c-4e8c-93e3-04305b2c2a99","Type":"ContainerStarted","Data":"95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4"} Oct 13 07:06:34 crc kubenswrapper[4664]: I1013 07:06:34.779341 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.77932274 podStartE2EDuration="3.77932274s" podCreationTimestamp="2025-10-13 07:06:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:34.774977423 +0000 UTC m=+1202.462422615" watchObservedRunningTime="2025-10-13 07:06:34.77932274 +0000 UTC m=+1202.466767932" Oct 13 07:06:35 crc kubenswrapper[4664]: I1013 07:06:35.059275 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99998d93-3c3d-4269-96b4-260b38f59814" path="/var/lib/kubelet/pods/99998d93-3c3d-4269-96b4-260b38f59814/volumes" Oct 13 07:06:36 crc kubenswrapper[4664]: I1013 07:06:36.776963 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerStarted","Data":"87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2"} Oct 13 07:06:36 crc kubenswrapper[4664]: I1013 07:06:36.777443 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 13 07:06:36 crc kubenswrapper[4664]: I1013 07:06:36.823122 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.787502935 podStartE2EDuration="7.823104494s" podCreationTimestamp="2025-10-13 07:06:29 +0000 UTC" firstStartedPulling="2025-10-13 07:06:30.791315235 +0000 UTC m=+1198.478760427" lastFinishedPulling="2025-10-13 07:06:35.826916794 +0000 UTC m=+1203.514361986" observedRunningTime="2025-10-13 07:06:36.815444307 +0000 UTC m=+1204.502889509" watchObservedRunningTime="2025-10-13 07:06:36.823104494 +0000 UTC m=+1204.510549686" Oct 13 07:06:37 crc kubenswrapper[4664]: I1013 07:06:37.062241 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 13 07:06:37 crc kubenswrapper[4664]: I1013 07:06:37.062288 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 13 07:06:37 crc kubenswrapper[4664]: I1013 07:06:37.738540 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:06:37 crc kubenswrapper[4664]: I1013 07:06:37.738608 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:06:37 crc kubenswrapper[4664]: I1013 07:06:37.871412 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:06:37 crc kubenswrapper[4664]: I1013 07:06:37.872459 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:06:38 crc kubenswrapper[4664]: I1013 07:06:38.798439 4664 generic.go:334] "Generic (PLEG): container finished" podID="b2b63677-e86e-4de3-8091-1c36dc8fab29" containerID="1263f9aab96e6a4f8c058ebe5805cd59868dea520ffee7fc4ac711c665860314" exitCode=0 Oct 13 07:06:38 crc kubenswrapper[4664]: I1013 07:06:38.798526 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2fnxr" event={"ID":"b2b63677-e86e-4de3-8091-1c36dc8fab29","Type":"ContainerDied","Data":"1263f9aab96e6a4f8c058ebe5805cd59868dea520ffee7fc4ac711c665860314"} Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.331247 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.465380 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgq8l\" (UniqueName: \"kubernetes.io/projected/b2b63677-e86e-4de3-8091-1c36dc8fab29-kube-api-access-tgq8l\") pod \"b2b63677-e86e-4de3-8091-1c36dc8fab29\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.465548 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-scripts\") pod \"b2b63677-e86e-4de3-8091-1c36dc8fab29\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.465627 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-combined-ca-bundle\") pod \"b2b63677-e86e-4de3-8091-1c36dc8fab29\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.465755 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-config-data\") pod \"b2b63677-e86e-4de3-8091-1c36dc8fab29\" (UID: \"b2b63677-e86e-4de3-8091-1c36dc8fab29\") " Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.471015 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-scripts" (OuterVolumeSpecName: "scripts") pod "b2b63677-e86e-4de3-8091-1c36dc8fab29" (UID: "b2b63677-e86e-4de3-8091-1c36dc8fab29"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.471512 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2b63677-e86e-4de3-8091-1c36dc8fab29-kube-api-access-tgq8l" (OuterVolumeSpecName: "kube-api-access-tgq8l") pod "b2b63677-e86e-4de3-8091-1c36dc8fab29" (UID: "b2b63677-e86e-4de3-8091-1c36dc8fab29"). InnerVolumeSpecName "kube-api-access-tgq8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.497940 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-config-data" (OuterVolumeSpecName: "config-data") pod "b2b63677-e86e-4de3-8091-1c36dc8fab29" (UID: "b2b63677-e86e-4de3-8091-1c36dc8fab29"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.505580 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2b63677-e86e-4de3-8091-1c36dc8fab29" (UID: "b2b63677-e86e-4de3-8091-1c36dc8fab29"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.567994 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.568038 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.568047 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgq8l\" (UniqueName: \"kubernetes.io/projected/b2b63677-e86e-4de3-8091-1c36dc8fab29-kube-api-access-tgq8l\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.568057 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b63677-e86e-4de3-8091-1c36dc8fab29-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.820080 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2fnxr" event={"ID":"b2b63677-e86e-4de3-8091-1c36dc8fab29","Type":"ContainerDied","Data":"4786ecafecf35cc6b444adb54e2c7b8653e13be8ed8a024a4296e97ea59e7b56"} Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.820354 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4786ecafecf35cc6b444adb54e2c7b8653e13be8ed8a024a4296e97ea59e7b56" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.820133 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2fnxr" Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.970187 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.970496 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-api" containerID="cri-o://e746bdb8eb48e1b97bc8ed090f6da2cfd5c9753cb2edfa61ad086b02261dd37e" gracePeriod=30 Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.971577 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-log" containerID="cri-o://6815bd9a0a01d33358499fe3caf5812c84146ba0b0b503742730fcd74f10a4b3" gracePeriod=30 Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.989066 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:40 crc kubenswrapper[4664]: I1013 07:06:40.989294 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f088ee30-e602-4c3d-b212-da3436e29c67" containerName="nova-scheduler-scheduler" containerID="cri-o://beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5" gracePeriod=30 Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.025312 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.025552 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-log" containerID="cri-o://8d90f1e667aaed57651a58cbbc1974a395b4f94384c883dbed9e803051409a31" gracePeriod=30 Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.028305 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-metadata" containerID="cri-o://95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4" gracePeriod=30 Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.832876 4664 generic.go:334] "Generic (PLEG): container finished" podID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerID="95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4" exitCode=0 Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.832912 4664 generic.go:334] "Generic (PLEG): container finished" podID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerID="8d90f1e667aaed57651a58cbbc1974a395b4f94384c883dbed9e803051409a31" exitCode=143 Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.832957 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"717ad5f1-331c-4e8c-93e3-04305b2c2a99","Type":"ContainerDied","Data":"95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4"} Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.832988 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"717ad5f1-331c-4e8c-93e3-04305b2c2a99","Type":"ContainerDied","Data":"8d90f1e667aaed57651a58cbbc1974a395b4f94384c883dbed9e803051409a31"} Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.835739 4664 generic.go:334] "Generic (PLEG): container finished" podID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerID="6815bd9a0a01d33358499fe3caf5812c84146ba0b0b503742730fcd74f10a4b3" exitCode=143 Oct 13 07:06:41 crc kubenswrapper[4664]: I1013 07:06:41.835774 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f6f2ac68-c742-4ace-8ead-35c43f62c679","Type":"ContainerDied","Data":"6815bd9a0a01d33358499fe3caf5812c84146ba0b0b503742730fcd74f10a4b3"} Oct 13 07:06:41 crc kubenswrapper[4664]: E1013 07:06:41.918554 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:06:41 crc kubenswrapper[4664]: E1013 07:06:41.967183 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:06:41 crc kubenswrapper[4664]: E1013 07:06:41.982330 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:06:41 crc kubenswrapper[4664]: E1013 07:06:41.982446 4664 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f088ee30-e602-4c3d-b212-da3436e29c67" containerName="nova-scheduler-scheduler" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.138550 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.227418 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-nova-metadata-tls-certs\") pod \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.227533 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/717ad5f1-331c-4e8c-93e3-04305b2c2a99-logs\") pod \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.227621 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdxwk\" (UniqueName: \"kubernetes.io/projected/717ad5f1-331c-4e8c-93e3-04305b2c2a99-kube-api-access-gdxwk\") pod \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.227655 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-config-data\") pod \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.227681 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-combined-ca-bundle\") pod \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\" (UID: \"717ad5f1-331c-4e8c-93e3-04305b2c2a99\") " Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.227894 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/717ad5f1-331c-4e8c-93e3-04305b2c2a99-logs" (OuterVolumeSpecName: "logs") pod "717ad5f1-331c-4e8c-93e3-04305b2c2a99" (UID: "717ad5f1-331c-4e8c-93e3-04305b2c2a99"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.228200 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/717ad5f1-331c-4e8c-93e3-04305b2c2a99-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.242005 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/717ad5f1-331c-4e8c-93e3-04305b2c2a99-kube-api-access-gdxwk" (OuterVolumeSpecName: "kube-api-access-gdxwk") pod "717ad5f1-331c-4e8c-93e3-04305b2c2a99" (UID: "717ad5f1-331c-4e8c-93e3-04305b2c2a99"). InnerVolumeSpecName "kube-api-access-gdxwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.293606 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-config-data" (OuterVolumeSpecName: "config-data") pod "717ad5f1-331c-4e8c-93e3-04305b2c2a99" (UID: "717ad5f1-331c-4e8c-93e3-04305b2c2a99"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.333225 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "717ad5f1-331c-4e8c-93e3-04305b2c2a99" (UID: "717ad5f1-331c-4e8c-93e3-04305b2c2a99"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.338503 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdxwk\" (UniqueName: \"kubernetes.io/projected/717ad5f1-331c-4e8c-93e3-04305b2c2a99-kube-api-access-gdxwk\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.339434 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.339642 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.371768 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "717ad5f1-331c-4e8c-93e3-04305b2c2a99" (UID: "717ad5f1-331c-4e8c-93e3-04305b2c2a99"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.443857 4664 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/717ad5f1-331c-4e8c-93e3-04305b2c2a99-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.845558 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"717ad5f1-331c-4e8c-93e3-04305b2c2a99","Type":"ContainerDied","Data":"04ec473bbd078497ef3c31673328c7a148c8f126e8997bf085925cf35836cfce"} Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.845610 4664 scope.go:117] "RemoveContainer" containerID="95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.845711 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.881630 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.889450 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.890080 4664 scope.go:117] "RemoveContainer" containerID="8d90f1e667aaed57651a58cbbc1974a395b4f94384c883dbed9e803051409a31" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.916469 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:42 crc kubenswrapper[4664]: E1013 07:06:42.916946 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99998d93-3c3d-4269-96b4-260b38f59814" containerName="init" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.916975 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="99998d93-3c3d-4269-96b4-260b38f59814" containerName="init" Oct 13 07:06:42 crc kubenswrapper[4664]: E1013 07:06:42.916992 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b63677-e86e-4de3-8091-1c36dc8fab29" containerName="nova-manage" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.916999 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b63677-e86e-4de3-8091-1c36dc8fab29" containerName="nova-manage" Oct 13 07:06:42 crc kubenswrapper[4664]: E1013 07:06:42.917010 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-log" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.917016 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-log" Oct 13 07:06:42 crc kubenswrapper[4664]: E1013 07:06:42.917052 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-metadata" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.917060 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-metadata" Oct 13 07:06:42 crc kubenswrapper[4664]: E1013 07:06:42.917075 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99998d93-3c3d-4269-96b4-260b38f59814" containerName="dnsmasq-dns" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.917080 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="99998d93-3c3d-4269-96b4-260b38f59814" containerName="dnsmasq-dns" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.917295 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-log" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.917317 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" containerName="nova-metadata-metadata" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.917327 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b63677-e86e-4de3-8091-1c36dc8fab29" containerName="nova-manage" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.917352 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="99998d93-3c3d-4269-96b4-260b38f59814" containerName="dnsmasq-dns" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.918516 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.924732 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.925487 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.934009 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.954778 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-config-data\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.954840 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43ce84a4-4ba5-4221-803a-af82eec6a563-logs\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.954873 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.955038 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkbh4\" (UniqueName: \"kubernetes.io/projected/43ce84a4-4ba5-4221-803a-af82eec6a563-kube-api-access-zkbh4\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:42 crc kubenswrapper[4664]: I1013 07:06:42.956644 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.058212 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-config-data\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.058687 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43ce84a4-4ba5-4221-803a-af82eec6a563-logs\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.058786 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.058907 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkbh4\" (UniqueName: \"kubernetes.io/projected/43ce84a4-4ba5-4221-803a-af82eec6a563-kube-api-access-zkbh4\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.058985 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.059043 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="717ad5f1-331c-4e8c-93e3-04305b2c2a99" path="/var/lib/kubelet/pods/717ad5f1-331c-4e8c-93e3-04305b2c2a99/volumes" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.059196 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43ce84a4-4ba5-4221-803a-af82eec6a563-logs\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.068700 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-config-data\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.071032 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.071392 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.075635 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkbh4\" (UniqueName: \"kubernetes.io/projected/43ce84a4-4ba5-4221-803a-af82eec6a563-kube-api-access-zkbh4\") pod \"nova-metadata-0\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.258250 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:06:43 crc kubenswrapper[4664]: I1013 07:06:43.950731 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:06:44 crc kubenswrapper[4664]: I1013 07:06:44.872676 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43ce84a4-4ba5-4221-803a-af82eec6a563","Type":"ContainerStarted","Data":"83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0"} Oct 13 07:06:44 crc kubenswrapper[4664]: I1013 07:06:44.872953 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43ce84a4-4ba5-4221-803a-af82eec6a563","Type":"ContainerStarted","Data":"4615143b34c2db742aa2eb2568613898b13169140bc17fd60b22f6b16fd4ec83"} Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.932409 4664 generic.go:334] "Generic (PLEG): container finished" podID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerID="e746bdb8eb48e1b97bc8ed090f6da2cfd5c9753cb2edfa61ad086b02261dd37e" exitCode=0 Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.932893 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f6f2ac68-c742-4ace-8ead-35c43f62c679","Type":"ContainerDied","Data":"e746bdb8eb48e1b97bc8ed090f6da2cfd5c9753cb2edfa61ad086b02261dd37e"} Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.943686 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43ce84a4-4ba5-4221-803a-af82eec6a563","Type":"ContainerStarted","Data":"a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8"} Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.946745 4664 generic.go:334] "Generic (PLEG): container finished" podID="f088ee30-e602-4c3d-b212-da3436e29c67" containerID="beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5" exitCode=0 Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.946892 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f088ee30-e602-4c3d-b212-da3436e29c67","Type":"ContainerDied","Data":"beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5"} Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.947209 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f088ee30-e602-4c3d-b212-da3436e29c67","Type":"ContainerDied","Data":"7a718f62398a2712f9af33167a6c087ab906ec9120f8dea82f9d937255c09b0e"} Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.947470 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a718f62398a2712f9af33167a6c087ab906ec9120f8dea82f9d937255c09b0e" Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.968468 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.9684519160000002 podStartE2EDuration="3.968451916s" podCreationTimestamp="2025-10-13 07:06:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:45.960559684 +0000 UTC m=+1213.648004886" watchObservedRunningTime="2025-10-13 07:06:45.968451916 +0000 UTC m=+1213.655897108" Oct 13 07:06:45 crc kubenswrapper[4664]: I1013 07:06:45.987135 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.009390 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.032933 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-combined-ca-bundle\") pod \"f088ee30-e602-4c3d-b212-da3436e29c67\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.033308 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-457fl\" (UniqueName: \"kubernetes.io/projected/f088ee30-e602-4c3d-b212-da3436e29c67-kube-api-access-457fl\") pod \"f088ee30-e602-4c3d-b212-da3436e29c67\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.033425 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-config-data\") pod \"f088ee30-e602-4c3d-b212-da3436e29c67\" (UID: \"f088ee30-e602-4c3d-b212-da3436e29c67\") " Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.041992 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f088ee30-e602-4c3d-b212-da3436e29c67-kube-api-access-457fl" (OuterVolumeSpecName: "kube-api-access-457fl") pod "f088ee30-e602-4c3d-b212-da3436e29c67" (UID: "f088ee30-e602-4c3d-b212-da3436e29c67"). InnerVolumeSpecName "kube-api-access-457fl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.079059 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-config-data" (OuterVolumeSpecName: "config-data") pod "f088ee30-e602-4c3d-b212-da3436e29c67" (UID: "f088ee30-e602-4c3d-b212-da3436e29c67"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.103175 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f088ee30-e602-4c3d-b212-da3436e29c67" (UID: "f088ee30-e602-4c3d-b212-da3436e29c67"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.135334 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6f2ac68-c742-4ace-8ead-35c43f62c679-logs\") pod \"f6f2ac68-c742-4ace-8ead-35c43f62c679\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.135533 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-config-data\") pod \"f6f2ac68-c742-4ace-8ead-35c43f62c679\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.135632 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9wjl\" (UniqueName: \"kubernetes.io/projected/f6f2ac68-c742-4ace-8ead-35c43f62c679-kube-api-access-z9wjl\") pod \"f6f2ac68-c742-4ace-8ead-35c43f62c679\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.135752 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-combined-ca-bundle\") pod \"f6f2ac68-c742-4ace-8ead-35c43f62c679\" (UID: \"f6f2ac68-c742-4ace-8ead-35c43f62c679\") " Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.136375 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-457fl\" (UniqueName: \"kubernetes.io/projected/f088ee30-e602-4c3d-b212-da3436e29c67-kube-api-access-457fl\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.137265 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.137338 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f088ee30-e602-4c3d-b212-da3436e29c67-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.137700 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6f2ac68-c742-4ace-8ead-35c43f62c679-logs" (OuterVolumeSpecName: "logs") pod "f6f2ac68-c742-4ace-8ead-35c43f62c679" (UID: "f6f2ac68-c742-4ace-8ead-35c43f62c679"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.141989 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6f2ac68-c742-4ace-8ead-35c43f62c679-kube-api-access-z9wjl" (OuterVolumeSpecName: "kube-api-access-z9wjl") pod "f6f2ac68-c742-4ace-8ead-35c43f62c679" (UID: "f6f2ac68-c742-4ace-8ead-35c43f62c679"). InnerVolumeSpecName "kube-api-access-z9wjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.174220 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6f2ac68-c742-4ace-8ead-35c43f62c679" (UID: "f6f2ac68-c742-4ace-8ead-35c43f62c679"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.175752 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-config-data" (OuterVolumeSpecName: "config-data") pod "f6f2ac68-c742-4ace-8ead-35c43f62c679" (UID: "f6f2ac68-c742-4ace-8ead-35c43f62c679"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.239650 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.239691 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6f2ac68-c742-4ace-8ead-35c43f62c679-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.239705 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6f2ac68-c742-4ace-8ead-35c43f62c679-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.239715 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9wjl\" (UniqueName: \"kubernetes.io/projected/f6f2ac68-c742-4ace-8ead-35c43f62c679-kube-api-access-z9wjl\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.957551 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.961141 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f6f2ac68-c742-4ace-8ead-35c43f62c679","Type":"ContainerDied","Data":"dafe26fe9e25c4c719348834e535351b9e98b5d5f7a114076124a7091e4c3def"} Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.961191 4664 scope.go:117] "RemoveContainer" containerID="e746bdb8eb48e1b97bc8ed090f6da2cfd5c9753cb2edfa61ad086b02261dd37e" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.961525 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.985356 4664 scope.go:117] "RemoveContainer" containerID="6815bd9a0a01d33358499fe3caf5812c84146ba0b0b503742730fcd74f10a4b3" Oct 13 07:06:46 crc kubenswrapper[4664]: I1013 07:06:46.992449 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.005714 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.025912 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.038548 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: E1013 07:06:47.038946 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f088ee30-e602-4c3d-b212-da3436e29c67" containerName="nova-scheduler-scheduler" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.038961 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f088ee30-e602-4c3d-b212-da3436e29c67" containerName="nova-scheduler-scheduler" Oct 13 07:06:47 crc kubenswrapper[4664]: E1013 07:06:47.038975 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-log" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.038983 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-log" Oct 13 07:06:47 crc kubenswrapper[4664]: E1013 07:06:47.039016 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-api" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.039022 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-api" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.039186 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-log" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.039196 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f088ee30-e602-4c3d-b212-da3436e29c67" containerName="nova-scheduler-scheduler" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.039209 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" containerName="nova-api-api" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.039859 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.042704 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.065618 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f088ee30-e602-4c3d-b212-da3436e29c67" path="/var/lib/kubelet/pods/f088ee30-e602-4c3d-b212-da3436e29c67/volumes" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.068267 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.088655 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.103047 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.105010 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.107064 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.128585 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.169530 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-config-data\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.169723 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x255\" (UniqueName: \"kubernetes.io/projected/6765f17a-8f68-4051-9311-52a89c5632d0-kube-api-access-6x255\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.169976 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-config-data\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.170032 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.170073 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.170106 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26rtj\" (UniqueName: \"kubernetes.io/projected/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-kube-api-access-26rtj\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.170328 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6765f17a-8f68-4051-9311-52a89c5632d0-logs\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.271786 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.272054 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26rtj\" (UniqueName: \"kubernetes.io/projected/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-kube-api-access-26rtj\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.272257 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6765f17a-8f68-4051-9311-52a89c5632d0-logs\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.272368 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-config-data\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.272509 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x255\" (UniqueName: \"kubernetes.io/projected/6765f17a-8f68-4051-9311-52a89c5632d0-kube-api-access-6x255\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.272657 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-config-data\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.272759 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.272866 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6765f17a-8f68-4051-9311-52a89c5632d0-logs\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.277699 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.278337 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.290249 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-config-data\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.290816 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-config-data\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.292089 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26rtj\" (UniqueName: \"kubernetes.io/projected/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-kube-api-access-26rtj\") pod \"nova-scheduler-0\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.292356 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x255\" (UniqueName: \"kubernetes.io/projected/6765f17a-8f68-4051-9311-52a89c5632d0-kube-api-access-6x255\") pod \"nova-api-0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.368611 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.426971 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.750015 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.881291 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:06:47 crc kubenswrapper[4664]: I1013 07:06:47.940071 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:06:48 crc kubenswrapper[4664]: I1013 07:06:48.005994 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bb8d5a94-642c-4dff-8ace-ebd7c844bb76","Type":"ContainerStarted","Data":"41d54a52b346121453c2954a0e69d7e5278680a774c0dc3cac3bc4266f5ca3c9"} Oct 13 07:06:48 crc kubenswrapper[4664]: I1013 07:06:48.156635 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:06:48 crc kubenswrapper[4664]: I1013 07:06:48.259885 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 13 07:06:48 crc kubenswrapper[4664]: I1013 07:06:48.259941 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 13 07:06:49 crc kubenswrapper[4664]: I1013 07:06:49.036296 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bb8d5a94-642c-4dff-8ace-ebd7c844bb76","Type":"ContainerStarted","Data":"b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75"} Oct 13 07:06:49 crc kubenswrapper[4664]: I1013 07:06:49.042841 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6765f17a-8f68-4051-9311-52a89c5632d0","Type":"ContainerStarted","Data":"2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22"} Oct 13 07:06:49 crc kubenswrapper[4664]: I1013 07:06:49.042886 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6765f17a-8f68-4051-9311-52a89c5632d0","Type":"ContainerStarted","Data":"8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67"} Oct 13 07:06:49 crc kubenswrapper[4664]: I1013 07:06:49.042899 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6765f17a-8f68-4051-9311-52a89c5632d0","Type":"ContainerStarted","Data":"413ced81dd5348072698d0bf486026a0fd1a680dc319756cd961d84ccf4e12d7"} Oct 13 07:06:49 crc kubenswrapper[4664]: I1013 07:06:49.082711 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.082689348 podStartE2EDuration="3.082689348s" podCreationTimestamp="2025-10-13 07:06:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:49.07791851 +0000 UTC m=+1216.765363712" watchObservedRunningTime="2025-10-13 07:06:49.082689348 +0000 UTC m=+1216.770134540" Oct 13 07:06:49 crc kubenswrapper[4664]: I1013 07:06:49.083680 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6f2ac68-c742-4ace-8ead-35c43f62c679" path="/var/lib/kubelet/pods/f6f2ac68-c742-4ace-8ead-35c43f62c679/volumes" Oct 13 07:06:49 crc kubenswrapper[4664]: I1013 07:06:49.109755 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.109738917 podStartE2EDuration="2.109738917s" podCreationTimestamp="2025-10-13 07:06:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:49.099376268 +0000 UTC m=+1216.786821470" watchObservedRunningTime="2025-10-13 07:06:49.109738917 +0000 UTC m=+1216.797184109" Oct 13 07:06:52 crc kubenswrapper[4664]: I1013 07:06:52.369223 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 13 07:06:53 crc kubenswrapper[4664]: I1013 07:06:53.079740 4664 generic.go:334] "Generic (PLEG): container finished" podID="7b3061d9-9611-4c41-af2e-5c978dc8032c" containerID="528c03dfb526f85e0949f7bd1aa1def2b8e23859b10e0221f02932b38a3e81a6" exitCode=0 Oct 13 07:06:53 crc kubenswrapper[4664]: I1013 07:06:53.079829 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" event={"ID":"7b3061d9-9611-4c41-af2e-5c978dc8032c","Type":"ContainerDied","Data":"528c03dfb526f85e0949f7bd1aa1def2b8e23859b10e0221f02932b38a3e81a6"} Oct 13 07:06:53 crc kubenswrapper[4664]: I1013 07:06:53.259568 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 13 07:06:53 crc kubenswrapper[4664]: I1013 07:06:53.259613 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.275949 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.276278 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.496339 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.636029 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-combined-ca-bundle\") pod \"7b3061d9-9611-4c41-af2e-5c978dc8032c\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.636098 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f52l7\" (UniqueName: \"kubernetes.io/projected/7b3061d9-9611-4c41-af2e-5c978dc8032c-kube-api-access-f52l7\") pod \"7b3061d9-9611-4c41-af2e-5c978dc8032c\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.636206 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-scripts\") pod \"7b3061d9-9611-4c41-af2e-5c978dc8032c\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.636375 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-config-data\") pod \"7b3061d9-9611-4c41-af2e-5c978dc8032c\" (UID: \"7b3061d9-9611-4c41-af2e-5c978dc8032c\") " Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.643238 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-scripts" (OuterVolumeSpecName: "scripts") pod "7b3061d9-9611-4c41-af2e-5c978dc8032c" (UID: "7b3061d9-9611-4c41-af2e-5c978dc8032c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.643281 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b3061d9-9611-4c41-af2e-5c978dc8032c-kube-api-access-f52l7" (OuterVolumeSpecName: "kube-api-access-f52l7") pod "7b3061d9-9611-4c41-af2e-5c978dc8032c" (UID: "7b3061d9-9611-4c41-af2e-5c978dc8032c"). InnerVolumeSpecName "kube-api-access-f52l7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.680521 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b3061d9-9611-4c41-af2e-5c978dc8032c" (UID: "7b3061d9-9611-4c41-af2e-5c978dc8032c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.682731 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-config-data" (OuterVolumeSpecName: "config-data") pod "7b3061d9-9611-4c41-af2e-5c978dc8032c" (UID: "7b3061d9-9611-4c41-af2e-5c978dc8032c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.737950 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.737996 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f52l7\" (UniqueName: \"kubernetes.io/projected/7b3061d9-9611-4c41-af2e-5c978dc8032c-kube-api-access-f52l7\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.738010 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:54 crc kubenswrapper[4664]: I1013 07:06:54.738022 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b3061d9-9611-4c41-af2e-5c978dc8032c-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.097129 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" event={"ID":"7b3061d9-9611-4c41-af2e-5c978dc8032c","Type":"ContainerDied","Data":"c9f92f45d87d5206cf12a5ca8a95e845dce31a21d60f0fbca7d476d3efb785c0"} Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.097174 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9f92f45d87d5206cf12a5ca8a95e845dce31a21d60f0fbca7d476d3efb785c0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.097175 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-q9n4w" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.196366 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 13 07:06:55 crc kubenswrapper[4664]: E1013 07:06:55.196739 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3061d9-9611-4c41-af2e-5c978dc8032c" containerName="nova-cell1-conductor-db-sync" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.196754 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3061d9-9611-4c41-af2e-5c978dc8032c" containerName="nova-cell1-conductor-db-sync" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.196985 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b3061d9-9611-4c41-af2e-5c978dc8032c" containerName="nova-cell1-conductor-db-sync" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.197611 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.210693 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.258231 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.358816 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49nmv\" (UniqueName: \"kubernetes.io/projected/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-kube-api-access-49nmv\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.359170 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.359294 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.461323 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49nmv\" (UniqueName: \"kubernetes.io/projected/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-kube-api-access-49nmv\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.461390 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.461531 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.475080 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.478588 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.516360 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49nmv\" (UniqueName: \"kubernetes.io/projected/4fa7f807-537d-4f7f-8ef4-a0f61358ec92-kube-api-access-49nmv\") pod \"nova-cell1-conductor-0\" (UID: \"4fa7f807-537d-4f7f-8ef4-a0f61358ec92\") " pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.586203 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:55 crc kubenswrapper[4664]: I1013 07:06:55.996333 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 13 07:06:56 crc kubenswrapper[4664]: I1013 07:06:56.134780 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4fa7f807-537d-4f7f-8ef4-a0f61358ec92","Type":"ContainerStarted","Data":"770814acbfc3b707eeac3133d698c8ed203db55c5c48717c7c77fac89ea828cf"} Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.144288 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4fa7f807-537d-4f7f-8ef4-a0f61358ec92","Type":"ContainerStarted","Data":"67dce1ef9fc9ef19d0804d41522744c1a3ba69d0e113b5cd7458a0873f9ef058"} Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.144637 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.168243 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.16822484 podStartE2EDuration="2.16822484s" podCreationTimestamp="2025-10-13 07:06:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:06:57.163155634 +0000 UTC m=+1224.850600826" watchObservedRunningTime="2025-10-13 07:06:57.16822484 +0000 UTC m=+1224.855670032" Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.377398 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.429226 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.429268 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.434980 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.739229 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:06:57 crc kubenswrapper[4664]: I1013 07:06:57.872057 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:06:58 crc kubenswrapper[4664]: I1013 07:06:58.192777 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 13 07:06:58 crc kubenswrapper[4664]: I1013 07:06:58.515017 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.209:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:06:58 crc kubenswrapper[4664]: I1013 07:06:58.515174 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.209:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:06:58 crc kubenswrapper[4664]: I1013 07:06:58.812302 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:06:58 crc kubenswrapper[4664]: I1013 07:06:58.812364 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:06:59 crc kubenswrapper[4664]: W1013 07:06:59.531945 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod717ad5f1_331c_4e8c_93e3_04305b2c2a99.slice/crio-95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4.scope WatchSource:0}: Error finding container 95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4: Status 404 returned error can't find the container with id 95bd9ae74a5a4e7ef0a6d034289c990566b16f1a7c19220ced682404571bc5b4 Oct 13 07:06:59 crc kubenswrapper[4664]: E1013 07:06:59.831296 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b3061d9_9611_4c41_af2e_5c978dc8032c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b3061d9_9611_4c41_af2e_5c978dc8032c.slice/crio-conmon-528c03dfb526f85e0949f7bd1aa1def2b8e23859b10e0221f02932b38a3e81a6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode74a1282_6d8d_4fdd_ab30_b4ba72ddc819.slice/crio-conmon-c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode74a1282_6d8d_4fdd_ab30_b4ba72ddc819.slice/crio-c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b3061d9_9611_4c41_af2e_5c978dc8032c.slice/crio-528c03dfb526f85e0949f7bd1aa1def2b8e23859b10e0221f02932b38a3e81a6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b3061d9_9611_4c41_af2e_5c978dc8032c.slice/crio-c9f92f45d87d5206cf12a5ca8a95e845dce31a21d60f0fbca7d476d3efb785c0\": RecentStats: unable to find data in memory cache]" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.085910 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.150494 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-config-data\") pod \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.150630 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj4mr\" (UniqueName: \"kubernetes.io/projected/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-kube-api-access-qj4mr\") pod \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.150770 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-combined-ca-bundle\") pod \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\" (UID: \"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819\") " Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.171028 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-kube-api-access-qj4mr" (OuterVolumeSpecName: "kube-api-access-qj4mr") pod "e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" (UID: "e74a1282-6d8d-4fdd-ab30-b4ba72ddc819"). InnerVolumeSpecName "kube-api-access-qj4mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.190851 4664 generic.go:334] "Generic (PLEG): container finished" podID="e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" containerID="c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca" exitCode=137 Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.190902 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819","Type":"ContainerDied","Data":"c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca"} Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.190927 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e74a1282-6d8d-4fdd-ab30-b4ba72ddc819","Type":"ContainerDied","Data":"0dca3a32c76e3e4e9d87f615ed196f6647dc214730244e3c1d3644d608f7a98c"} Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.190932 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.190943 4664 scope.go:117] "RemoveContainer" containerID="c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.196290 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-config-data" (OuterVolumeSpecName: "config-data") pod "e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" (UID: "e74a1282-6d8d-4fdd-ab30-b4ba72ddc819"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.222323 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.256489 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.256842 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj4mr\" (UniqueName: \"kubernetes.io/projected/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-kube-api-access-qj4mr\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.299651 4664 scope.go:117] "RemoveContainer" containerID="c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca" Oct 13 07:07:00 crc kubenswrapper[4664]: E1013 07:07:00.300221 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca\": container with ID starting with c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca not found: ID does not exist" containerID="c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.300267 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca"} err="failed to get container status \"c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca\": rpc error: code = NotFound desc = could not find container \"c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca\": container with ID starting with c4e250cbdfc766f5fd120cfd51155e4b2a08a199a90bf7169704f84a133f7fca not found: ID does not exist" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.326139 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" (UID: "e74a1282-6d8d-4fdd-ab30-b4ba72ddc819"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.359553 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.566866 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.582545 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.611729 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:07:00 crc kubenswrapper[4664]: E1013 07:07:00.622401 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" containerName="nova-cell1-novncproxy-novncproxy" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.622437 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" containerName="nova-cell1-novncproxy-novncproxy" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.626327 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" containerName="nova-cell1-novncproxy-novncproxy" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.627410 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.633739 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.634622 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.681538 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.706959 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.709533 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x52bb\" (UniqueName: \"kubernetes.io/projected/0aeab06b-f1e7-4479-90f9-726b28672eaa-kube-api-access-x52bb\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.709719 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.709859 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.709997 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.710078 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.811750 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.811816 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.811874 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.811890 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.811965 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x52bb\" (UniqueName: \"kubernetes.io/projected/0aeab06b-f1e7-4479-90f9-726b28672eaa-kube-api-access-x52bb\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.817845 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.818427 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.820720 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.820759 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aeab06b-f1e7-4479-90f9-726b28672eaa-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.833879 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x52bb\" (UniqueName: \"kubernetes.io/projected/0aeab06b-f1e7-4479-90f9-726b28672eaa-kube-api-access-x52bb\") pod \"nova-cell1-novncproxy-0\" (UID: \"0aeab06b-f1e7-4479-90f9-726b28672eaa\") " pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:00 crc kubenswrapper[4664]: I1013 07:07:00.973806 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:01 crc kubenswrapper[4664]: I1013 07:07:01.061456 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e74a1282-6d8d-4fdd-ab30-b4ba72ddc819" path="/var/lib/kubelet/pods/e74a1282-6d8d-4fdd-ab30-b4ba72ddc819/volumes" Oct 13 07:07:01 crc kubenswrapper[4664]: I1013 07:07:01.589679 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 13 07:07:02 crc kubenswrapper[4664]: I1013 07:07:02.232704 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"0aeab06b-f1e7-4479-90f9-726b28672eaa","Type":"ContainerStarted","Data":"ac2929915ea6604d72a89c3bbd104104cfce7ae93f406dabdf7e9ad16ce05f66"} Oct 13 07:07:02 crc kubenswrapper[4664]: I1013 07:07:02.233325 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"0aeab06b-f1e7-4479-90f9-726b28672eaa","Type":"ContainerStarted","Data":"1cbec241c76fdcaa294a845e8b3fc219bfe1972dc36982d0aaccac29873737b7"} Oct 13 07:07:02 crc kubenswrapper[4664]: I1013 07:07:02.264454 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.26443077 podStartE2EDuration="2.26443077s" podCreationTimestamp="2025-10-13 07:07:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:07:02.258761188 +0000 UTC m=+1229.946206400" watchObservedRunningTime="2025-10-13 07:07:02.26443077 +0000 UTC m=+1229.951875962" Oct 13 07:07:03 crc kubenswrapper[4664]: I1013 07:07:03.281341 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 13 07:07:03 crc kubenswrapper[4664]: I1013 07:07:03.283305 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 13 07:07:03 crc kubenswrapper[4664]: I1013 07:07:03.302663 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 13 07:07:04 crc kubenswrapper[4664]: I1013 07:07:04.506445 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 13 07:07:05 crc kubenswrapper[4664]: I1013 07:07:05.623047 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Oct 13 07:07:05 crc kubenswrapper[4664]: I1013 07:07:05.772709 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:07:05 crc kubenswrapper[4664]: I1013 07:07:05.772962 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="21624285-c826-40e3-8963-a2ac3cf7efd8" containerName="kube-state-metrics" containerID="cri-o://8cbc1f6b16fdf110ead5e896ff6d02f58da39021ae4f918cb9c29f39225ea7dd" gracePeriod=30 Oct 13 07:07:05 crc kubenswrapper[4664]: I1013 07:07:05.974166 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:06 crc kubenswrapper[4664]: I1013 07:07:06.309307 4664 generic.go:334] "Generic (PLEG): container finished" podID="21624285-c826-40e3-8963-a2ac3cf7efd8" containerID="8cbc1f6b16fdf110ead5e896ff6d02f58da39021ae4f918cb9c29f39225ea7dd" exitCode=2 Oct 13 07:07:06 crc kubenswrapper[4664]: I1013 07:07:06.309741 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21624285-c826-40e3-8963-a2ac3cf7efd8","Type":"ContainerDied","Data":"8cbc1f6b16fdf110ead5e896ff6d02f58da39021ae4f918cb9c29f39225ea7dd"} Oct 13 07:07:06 crc kubenswrapper[4664]: I1013 07:07:06.383616 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 13 07:07:06 crc kubenswrapper[4664]: I1013 07:07:06.424376 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztwcm\" (UniqueName: \"kubernetes.io/projected/21624285-c826-40e3-8963-a2ac3cf7efd8-kube-api-access-ztwcm\") pod \"21624285-c826-40e3-8963-a2ac3cf7efd8\" (UID: \"21624285-c826-40e3-8963-a2ac3cf7efd8\") " Oct 13 07:07:06 crc kubenswrapper[4664]: I1013 07:07:06.457331 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21624285-c826-40e3-8963-a2ac3cf7efd8-kube-api-access-ztwcm" (OuterVolumeSpecName: "kube-api-access-ztwcm") pod "21624285-c826-40e3-8963-a2ac3cf7efd8" (UID: "21624285-c826-40e3-8963-a2ac3cf7efd8"). InnerVolumeSpecName "kube-api-access-ztwcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:06 crc kubenswrapper[4664]: I1013 07:07:06.526586 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztwcm\" (UniqueName: \"kubernetes.io/projected/21624285-c826-40e3-8963-a2ac3cf7efd8-kube-api-access-ztwcm\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.327353 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21624285-c826-40e3-8963-a2ac3cf7efd8","Type":"ContainerDied","Data":"7932489b6a6772bc6d5e24a9626949ae4fb10def5a473f1f023fe8e2ec1116db"} Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.327401 4664 scope.go:117] "RemoveContainer" containerID="8cbc1f6b16fdf110ead5e896ff6d02f58da39021ae4f918cb9c29f39225ea7dd" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.328415 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.354544 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.380528 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.423706 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:07:07 crc kubenswrapper[4664]: E1013 07:07:07.424199 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21624285-c826-40e3-8963-a2ac3cf7efd8" containerName="kube-state-metrics" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.424225 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="21624285-c826-40e3-8963-a2ac3cf7efd8" containerName="kube-state-metrics" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.424622 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="21624285-c826-40e3-8963-a2ac3cf7efd8" containerName="kube-state-metrics" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.425542 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.429493 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.429667 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.467249 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.549607 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.549930 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6msdq\" (UniqueName: \"kubernetes.io/projected/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-api-access-6msdq\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.550075 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.550228 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.651920 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.652045 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.652092 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6msdq\" (UniqueName: \"kubernetes.io/projected/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-api-access-6msdq\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.652162 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.660727 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.662613 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.667207 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.691956 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6msdq\" (UniqueName: \"kubernetes.io/projected/43ff3dc8-8a2c-4051-813b-69406ed7359e-kube-api-access-6msdq\") pod \"kube-state-metrics-0\" (UID: \"43ff3dc8-8a2c-4051-813b-69406ed7359e\") " pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.739218 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.739301 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.740122 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"c8df9bd385c5532e5d86f910ff25bb0dd8e4b604a40eb79ea6d7d648ffc05ad6"} pod="openstack/horizon-8487d6c5d4-cgnm9" containerMessage="Container horizon failed startup probe, will be restarted" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.740157 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" containerID="cri-o://c8df9bd385c5532e5d86f910ff25bb0dd8e4b604a40eb79ea6d7d648ffc05ad6" gracePeriod=30 Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.745338 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.802400 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.803159 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.852387 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.872834 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.872926 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.873555 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.888740 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"716b360bd426e69bd996488b830583fb95239ac8059eb566e7ac7b80470cc331"} pod="openstack/horizon-7d78c558d-rjg4v" containerMessage="Container horizon failed startup probe, will be restarted" Oct 13 07:07:07 crc kubenswrapper[4664]: I1013 07:07:07.888817 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" containerID="cri-o://716b360bd426e69bd996488b830583fb95239ac8059eb566e7ac7b80470cc331" gracePeriod=30 Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.339897 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.346956 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.515434 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b474f45dc-wqs97"] Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.516995 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.528463 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b474f45dc-wqs97"] Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.575455 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-config\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.575497 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-nb\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.575528 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-swift-storage-0\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.575573 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-svc\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.575628 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66248\" (UniqueName: \"kubernetes.io/projected/3bc8906d-8312-44af-9ff5-eeac77de1533-kube-api-access-66248\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.575648 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-sb\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.676953 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66248\" (UniqueName: \"kubernetes.io/projected/3bc8906d-8312-44af-9ff5-eeac77de1533-kube-api-access-66248\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.677008 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-sb\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.677086 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-config\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.677124 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-nb\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.677159 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-swift-storage-0\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.677214 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-svc\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.678307 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-svc\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.679192 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-sb\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.679766 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-config\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.680324 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-nb\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.680981 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-swift-storage-0\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.684403 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.684652 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-central-agent" containerID="cri-o://c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a" gracePeriod=30 Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.685053 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="proxy-httpd" containerID="cri-o://87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2" gracePeriod=30 Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.685100 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="sg-core" containerID="cri-o://5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce" gracePeriod=30 Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.685132 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-notification-agent" containerID="cri-o://05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939" gracePeriod=30 Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.704621 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66248\" (UniqueName: \"kubernetes.io/projected/3bc8906d-8312-44af-9ff5-eeac77de1533-kube-api-access-66248\") pod \"dnsmasq-dns-b474f45dc-wqs97\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:08 crc kubenswrapper[4664]: I1013 07:07:08.849479 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:09 crc kubenswrapper[4664]: I1013 07:07:09.072146 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21624285-c826-40e3-8963-a2ac3cf7efd8" path="/var/lib/kubelet/pods/21624285-c826-40e3-8963-a2ac3cf7efd8/volumes" Oct 13 07:07:09 crc kubenswrapper[4664]: I1013 07:07:09.348290 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 13 07:07:09 crc kubenswrapper[4664]: I1013 07:07:09.392414 4664 generic.go:334] "Generic (PLEG): container finished" podID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerID="87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2" exitCode=0 Oct 13 07:07:09 crc kubenswrapper[4664]: I1013 07:07:09.392447 4664 generic.go:334] "Generic (PLEG): container finished" podID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerID="5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce" exitCode=2 Oct 13 07:07:09 crc kubenswrapper[4664]: I1013 07:07:09.393194 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerDied","Data":"87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2"} Oct 13 07:07:09 crc kubenswrapper[4664]: I1013 07:07:09.393250 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerDied","Data":"5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce"} Oct 13 07:07:09 crc kubenswrapper[4664]: W1013 07:07:09.559551 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3bc8906d_8312_44af_9ff5_eeac77de1533.slice/crio-4d899e8add38a4603d0e0b155bbfa07f02c82d634e27057047d9affb650be909 WatchSource:0}: Error finding container 4d899e8add38a4603d0e0b155bbfa07f02c82d634e27057047d9affb650be909: Status 404 returned error can't find the container with id 4d899e8add38a4603d0e0b155bbfa07f02c82d634e27057047d9affb650be909 Oct 13 07:07:09 crc kubenswrapper[4664]: I1013 07:07:09.560317 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b474f45dc-wqs97"] Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.402761 4664 generic.go:334] "Generic (PLEG): container finished" podID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerID="1d5e386958a20c575f8ba47ededb64716b2b2e274c4aae690e5c0b203fb1e1c4" exitCode=0 Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.402839 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" event={"ID":"3bc8906d-8312-44af-9ff5-eeac77de1533","Type":"ContainerDied","Data":"1d5e386958a20c575f8ba47ededb64716b2b2e274c4aae690e5c0b203fb1e1c4"} Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.403272 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" event={"ID":"3bc8906d-8312-44af-9ff5-eeac77de1533","Type":"ContainerStarted","Data":"4d899e8add38a4603d0e0b155bbfa07f02c82d634e27057047d9affb650be909"} Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.408694 4664 generic.go:334] "Generic (PLEG): container finished" podID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerID="c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a" exitCode=0 Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.408776 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerDied","Data":"c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a"} Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.412512 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"43ff3dc8-8a2c-4051-813b-69406ed7359e","Type":"ContainerStarted","Data":"2c7ba22774e9cf0031b3de871d1ae6c4f5e2b58ba81ba4e4ee9b25455ab492de"} Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.412556 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"43ff3dc8-8a2c-4051-813b-69406ed7359e","Type":"ContainerStarted","Data":"aad5923c5c1df5c3317b32ddba61a740852eabc794e9e7d8b0cdada955c216a3"} Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.412622 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.490864 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.8962185639999998 podStartE2EDuration="3.490840655s" podCreationTimestamp="2025-10-13 07:07:07 +0000 UTC" firstStartedPulling="2025-10-13 07:07:09.363375715 +0000 UTC m=+1237.050820907" lastFinishedPulling="2025-10-13 07:07:09.957997806 +0000 UTC m=+1237.645442998" observedRunningTime="2025-10-13 07:07:10.46591823 +0000 UTC m=+1238.153363442" watchObservedRunningTime="2025-10-13 07:07:10.490840655 +0000 UTC m=+1238.178285847" Oct 13 07:07:10 crc kubenswrapper[4664]: I1013 07:07:10.973979 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.039290 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.421646 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" event={"ID":"3bc8906d-8312-44af-9ff5-eeac77de1533","Type":"ContainerStarted","Data":"a1fd38c1fa7771421a3d3dd7cbb37829c0170768844388f4674115d543e52eeb"} Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.422160 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.445736 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.446127 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" podStartSLOduration=3.446103644 podStartE2EDuration="3.446103644s" podCreationTimestamp="2025-10-13 07:07:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:07:11.438612993 +0000 UTC m=+1239.126058205" watchObservedRunningTime="2025-10-13 07:07:11.446103644 +0000 UTC m=+1239.133548846" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.640926 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-9qtdr"] Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.646264 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.649964 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.650177 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.665614 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-9qtdr"] Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.745508 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-scripts\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.745555 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.745760 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7brw\" (UniqueName: \"kubernetes.io/projected/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-kube-api-access-g7brw\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.745822 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-config-data\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.777174 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.777646 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-log" containerID="cri-o://8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67" gracePeriod=30 Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.777708 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-api" containerID="cri-o://2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22" gracePeriod=30 Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.847214 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7brw\" (UniqueName: \"kubernetes.io/projected/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-kube-api-access-g7brw\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.847265 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-config-data\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.847335 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-scripts\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.847361 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.852321 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-scripts\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.856278 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.866810 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-config-data\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.869252 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7brw\" (UniqueName: \"kubernetes.io/projected/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-kube-api-access-g7brw\") pod \"nova-cell1-cell-mapping-9qtdr\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:11 crc kubenswrapper[4664]: I1013 07:07:11.968896 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:12 crc kubenswrapper[4664]: I1013 07:07:12.432970 4664 generic.go:334] "Generic (PLEG): container finished" podID="6765f17a-8f68-4051-9311-52a89c5632d0" containerID="8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67" exitCode=143 Oct 13 07:07:12 crc kubenswrapper[4664]: I1013 07:07:12.433046 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6765f17a-8f68-4051-9311-52a89c5632d0","Type":"ContainerDied","Data":"8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67"} Oct 13 07:07:12 crc kubenswrapper[4664]: I1013 07:07:12.542034 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-9qtdr"] Oct 13 07:07:12 crc kubenswrapper[4664]: W1013 07:07:12.543569 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98cb4b7a_e3b0_496a_8b4c_71c78b879e44.slice/crio-7413a1b90fa947bb43fb3f5ef9459ec6f3a6ecd0b85883546e225b01c355ce8f WatchSource:0}: Error finding container 7413a1b90fa947bb43fb3f5ef9459ec6f3a6ecd0b85883546e225b01c355ce8f: Status 404 returned error can't find the container with id 7413a1b90fa947bb43fb3f5ef9459ec6f3a6ecd0b85883546e225b01c355ce8f Oct 13 07:07:13 crc kubenswrapper[4664]: I1013 07:07:13.447520 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9qtdr" event={"ID":"98cb4b7a-e3b0-496a-8b4c-71c78b879e44","Type":"ContainerStarted","Data":"5bed13129d9f588d4806076dd61b7882f0673a7c3684bacdf19306bd9e51287e"} Oct 13 07:07:13 crc kubenswrapper[4664]: I1013 07:07:13.447856 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9qtdr" event={"ID":"98cb4b7a-e3b0-496a-8b4c-71c78b879e44","Type":"ContainerStarted","Data":"7413a1b90fa947bb43fb3f5ef9459ec6f3a6ecd0b85883546e225b01c355ce8f"} Oct 13 07:07:13 crc kubenswrapper[4664]: I1013 07:07:13.469984 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-9qtdr" podStartSLOduration=2.46996254 podStartE2EDuration="2.46996254s" podCreationTimestamp="2025-10-13 07:07:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:07:13.465997473 +0000 UTC m=+1241.153442685" watchObservedRunningTime="2025-10-13 07:07:13.46996254 +0000 UTC m=+1241.157407742" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.265360 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.331993 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-sg-core-conf-yaml\") pod \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.332113 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-config-data\") pod \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.332135 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-scripts\") pod \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.332259 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-run-httpd\") pod \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.332303 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9vbn\" (UniqueName: \"kubernetes.io/projected/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-kube-api-access-q9vbn\") pod \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.332333 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-combined-ca-bundle\") pod \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.332390 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-log-httpd\") pod \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\" (UID: \"60eab1c9-7ecd-47df-8f08-b6c11e11b18f\") " Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.333164 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "60eab1c9-7ecd-47df-8f08-b6c11e11b18f" (UID: "60eab1c9-7ecd-47df-8f08-b6c11e11b18f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.334211 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "60eab1c9-7ecd-47df-8f08-b6c11e11b18f" (UID: "60eab1c9-7ecd-47df-8f08-b6c11e11b18f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.340678 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-kube-api-access-q9vbn" (OuterVolumeSpecName: "kube-api-access-q9vbn") pod "60eab1c9-7ecd-47df-8f08-b6c11e11b18f" (UID: "60eab1c9-7ecd-47df-8f08-b6c11e11b18f"). InnerVolumeSpecName "kube-api-access-q9vbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.346300 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-scripts" (OuterVolumeSpecName: "scripts") pod "60eab1c9-7ecd-47df-8f08-b6c11e11b18f" (UID: "60eab1c9-7ecd-47df-8f08-b6c11e11b18f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.388349 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "60eab1c9-7ecd-47df-8f08-b6c11e11b18f" (UID: "60eab1c9-7ecd-47df-8f08-b6c11e11b18f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.434821 4664 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.435048 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9vbn\" (UniqueName: \"kubernetes.io/projected/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-kube-api-access-q9vbn\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.435185 4664 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.435274 4664 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.435335 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.460887 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "60eab1c9-7ecd-47df-8f08-b6c11e11b18f" (UID: "60eab1c9-7ecd-47df-8f08-b6c11e11b18f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.468425 4664 generic.go:334] "Generic (PLEG): container finished" podID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerID="05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939" exitCode=0 Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.472916 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.474289 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerDied","Data":"05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939"} Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.474391 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60eab1c9-7ecd-47df-8f08-b6c11e11b18f","Type":"ContainerDied","Data":"61a0b931c22097f8d9c49c836c560971e82a9641b9dda1871802f3a1cf55298b"} Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.474470 4664 scope.go:117] "RemoveContainer" containerID="87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.507004 4664 scope.go:117] "RemoveContainer" containerID="5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.519843 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-config-data" (OuterVolumeSpecName: "config-data") pod "60eab1c9-7ecd-47df-8f08-b6c11e11b18f" (UID: "60eab1c9-7ecd-47df-8f08-b6c11e11b18f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.524293 4664 scope.go:117] "RemoveContainer" containerID="05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.538523 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.538557 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60eab1c9-7ecd-47df-8f08-b6c11e11b18f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.541989 4664 scope.go:117] "RemoveContainer" containerID="c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.560719 4664 scope.go:117] "RemoveContainer" containerID="87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2" Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.561061 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2\": container with ID starting with 87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2 not found: ID does not exist" containerID="87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.561109 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2"} err="failed to get container status \"87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2\": rpc error: code = NotFound desc = could not find container \"87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2\": container with ID starting with 87a60c26ed4487ba5e12d74c7569e08e2e73054951b32c94668f26293af100f2 not found: ID does not exist" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.561137 4664 scope.go:117] "RemoveContainer" containerID="5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce" Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.561444 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce\": container with ID starting with 5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce not found: ID does not exist" containerID="5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.561463 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce"} err="failed to get container status \"5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce\": rpc error: code = NotFound desc = could not find container \"5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce\": container with ID starting with 5f3e95a2d5ef1e02f97c26c06179a30cfe27bd0dbb96316cc96da83b897c91ce not found: ID does not exist" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.561475 4664 scope.go:117] "RemoveContainer" containerID="05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939" Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.561776 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939\": container with ID starting with 05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939 not found: ID does not exist" containerID="05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.561815 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939"} err="failed to get container status \"05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939\": rpc error: code = NotFound desc = could not find container \"05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939\": container with ID starting with 05dc27c1b682c5ddc1cc5f5be8f208b456c26e59fa81249acf1e74da74b3e939 not found: ID does not exist" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.561828 4664 scope.go:117] "RemoveContainer" containerID="c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a" Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.562166 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a\": container with ID starting with c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a not found: ID does not exist" containerID="c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.562197 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a"} err="failed to get container status \"c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a\": rpc error: code = NotFound desc = could not find container \"c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a\": container with ID starting with c501c08e5ed39cc17cd12fb7d0af2cb6283a203d5dc9e8c82e2f5dad912d670a not found: ID does not exist" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.807615 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.815725 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827296 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.827673 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-central-agent" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827689 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-central-agent" Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.827712 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-notification-agent" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827718 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-notification-agent" Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.827739 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="sg-core" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827746 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="sg-core" Oct 13 07:07:14 crc kubenswrapper[4664]: E1013 07:07:14.827760 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="proxy-httpd" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827766 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="proxy-httpd" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827944 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-notification-agent" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827956 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="sg-core" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827973 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="ceilometer-central-agent" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.827987 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" containerName="proxy-httpd" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.829639 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.836709 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.836878 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.839191 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.842512 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947039 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-config-data\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947089 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-run-httpd\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947135 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947191 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947230 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947254 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdssm\" (UniqueName: \"kubernetes.io/projected/994ad42e-896a-4f59-b3f0-1e81f062807e-kube-api-access-tdssm\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947281 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-log-httpd\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:14 crc kubenswrapper[4664]: I1013 07:07:14.947296 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-scripts\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.027483 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:15 crc kubenswrapper[4664]: E1013 07:07:15.028353 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceilometer-tls-certs combined-ca-bundle config-data kube-api-access-tdssm log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="994ad42e-896a-4f59-b3f0-1e81f062807e" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.048918 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-config-data\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.048965 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-run-httpd\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.049010 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.049067 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.049116 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.049141 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdssm\" (UniqueName: \"kubernetes.io/projected/994ad42e-896a-4f59-b3f0-1e81f062807e-kube-api-access-tdssm\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.049167 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-log-httpd\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.049184 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-scripts\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.050328 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-run-httpd\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.050418 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-log-httpd\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.053965 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-scripts\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.054056 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.054173 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-config-data\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.056399 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.056935 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.064879 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60eab1c9-7ecd-47df-8f08-b6c11e11b18f" path="/var/lib/kubelet/pods/60eab1c9-7ecd-47df-8f08-b6c11e11b18f/volumes" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.068120 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdssm\" (UniqueName: \"kubernetes.io/projected/994ad42e-896a-4f59-b3f0-1e81f062807e-kube-api-access-tdssm\") pod \"ceilometer-0\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.483471 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.487548 4664 generic.go:334] "Generic (PLEG): container finished" podID="6765f17a-8f68-4051-9311-52a89c5632d0" containerID="2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22" exitCode=0 Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.487647 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.487759 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6765f17a-8f68-4051-9311-52a89c5632d0","Type":"ContainerDied","Data":"2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22"} Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.487849 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6765f17a-8f68-4051-9311-52a89c5632d0","Type":"ContainerDied","Data":"413ced81dd5348072698d0bf486026a0fd1a680dc319756cd961d84ccf4e12d7"} Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.487881 4664 scope.go:117] "RemoveContainer" containerID="2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.505049 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.529105 4664 scope.go:117] "RemoveContainer" containerID="8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.563481 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6765f17a-8f68-4051-9311-52a89c5632d0-logs\") pod \"6765f17a-8f68-4051-9311-52a89c5632d0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565317 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-sg-core-conf-yaml\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565420 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-scripts\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565458 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6x255\" (UniqueName: \"kubernetes.io/projected/6765f17a-8f68-4051-9311-52a89c5632d0-kube-api-access-6x255\") pod \"6765f17a-8f68-4051-9311-52a89c5632d0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565480 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-ceilometer-tls-certs\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565541 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-run-httpd\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565581 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdssm\" (UniqueName: \"kubernetes.io/projected/994ad42e-896a-4f59-b3f0-1e81f062807e-kube-api-access-tdssm\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565610 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-config-data\") pod \"6765f17a-8f68-4051-9311-52a89c5632d0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565616 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6765f17a-8f68-4051-9311-52a89c5632d0-logs" (OuterVolumeSpecName: "logs") pod "6765f17a-8f68-4051-9311-52a89c5632d0" (UID: "6765f17a-8f68-4051-9311-52a89c5632d0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565644 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-log-httpd\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565663 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-combined-ca-bundle\") pod \"6765f17a-8f68-4051-9311-52a89c5632d0\" (UID: \"6765f17a-8f68-4051-9311-52a89c5632d0\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565706 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-combined-ca-bundle\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.565726 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-config-data\") pod \"994ad42e-896a-4f59-b3f0-1e81f062807e\" (UID: \"994ad42e-896a-4f59-b3f0-1e81f062807e\") " Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.566241 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6765f17a-8f68-4051-9311-52a89c5632d0-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.575755 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.576726 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.577295 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.581587 4664 scope.go:117] "RemoveContainer" containerID="2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.581739 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-scripts" (OuterVolumeSpecName: "scripts") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: E1013 07:07:15.584045 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22\": container with ID starting with 2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22 not found: ID does not exist" containerID="2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.584084 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22"} err="failed to get container status \"2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22\": rpc error: code = NotFound desc = could not find container \"2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22\": container with ID starting with 2c34775f425e1bf776df013331371a26b01a868d0412de08734e7d0f754efb22 not found: ID does not exist" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.584108 4664 scope.go:117] "RemoveContainer" containerID="8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.586176 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/994ad42e-896a-4f59-b3f0-1e81f062807e-kube-api-access-tdssm" (OuterVolumeSpecName: "kube-api-access-tdssm") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "kube-api-access-tdssm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: E1013 07:07:15.586312 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67\": container with ID starting with 8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67 not found: ID does not exist" containerID="8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.586339 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67"} err="failed to get container status \"8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67\": rpc error: code = NotFound desc = could not find container \"8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67\": container with ID starting with 8d4937792ce883ef85c34746d2d5e20bdc6fd5796f54b759ceb3c6044c4d3a67 not found: ID does not exist" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.587654 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6765f17a-8f68-4051-9311-52a89c5632d0-kube-api-access-6x255" (OuterVolumeSpecName: "kube-api-access-6x255") pod "6765f17a-8f68-4051-9311-52a89c5632d0" (UID: "6765f17a-8f68-4051-9311-52a89c5632d0"). InnerVolumeSpecName "kube-api-access-6x255". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.587945 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.590783 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.603525 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-config-data" (OuterVolumeSpecName: "config-data") pod "994ad42e-896a-4f59-b3f0-1e81f062807e" (UID: "994ad42e-896a-4f59-b3f0-1e81f062807e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.632107 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-config-data" (OuterVolumeSpecName: "config-data") pod "6765f17a-8f68-4051-9311-52a89c5632d0" (UID: "6765f17a-8f68-4051-9311-52a89c5632d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.649842 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6765f17a-8f68-4051-9311-52a89c5632d0" (UID: "6765f17a-8f68-4051-9311-52a89c5632d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668148 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668180 4664 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668190 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6765f17a-8f68-4051-9311-52a89c5632d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668201 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668209 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668217 4664 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668226 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668234 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6x255\" (UniqueName: \"kubernetes.io/projected/6765f17a-8f68-4051-9311-52a89c5632d0-kube-api-access-6x255\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668242 4664 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/994ad42e-896a-4f59-b3f0-1e81f062807e-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668251 4664 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/994ad42e-896a-4f59-b3f0-1e81f062807e-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:15 crc kubenswrapper[4664]: I1013 07:07:15.668258 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdssm\" (UniqueName: \"kubernetes.io/projected/994ad42e-896a-4f59-b3f0-1e81f062807e-kube-api-access-tdssm\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.497317 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.497341 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.559916 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.572118 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.580422 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.580455 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.662221 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: E1013 07:07:16.662739 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-log" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.662764 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-log" Oct 13 07:07:16 crc kubenswrapper[4664]: E1013 07:07:16.662823 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-api" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.662832 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-api" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.663052 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-log" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.663093 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" containerName="nova-api-api" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.667402 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.672280 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.672366 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.678589 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.695099 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.696687 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.699878 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.700034 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.700154 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711044 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711091 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-scripts\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711123 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711174 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llwzr\" (UniqueName: \"kubernetes.io/projected/bcc8e7a7-4341-433f-aca2-8beec1daf684-kube-api-access-llwzr\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711203 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-run-httpd\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711230 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711285 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-config-data\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.711302 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-log-httpd\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.714935 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.732673 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813027 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-public-tls-certs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813094 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813131 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-scripts\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813165 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813186 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813209 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grlnf\" (UniqueName: \"kubernetes.io/projected/0a0adbf1-c9ff-4373-8531-adc3bd33102b-kube-api-access-grlnf\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813250 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llwzr\" (UniqueName: \"kubernetes.io/projected/bcc8e7a7-4341-433f-aca2-8beec1daf684-kube-api-access-llwzr\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813281 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-run-httpd\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813310 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813327 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a0adbf1-c9ff-4373-8531-adc3bd33102b-logs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813376 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813404 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-config-data\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813420 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-config-data\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.813440 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-log-httpd\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.814259 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-log-httpd\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.814942 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-run-httpd\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.817698 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.818514 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-scripts\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.818880 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.822256 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.832051 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llwzr\" (UniqueName: \"kubernetes.io/projected/bcc8e7a7-4341-433f-aca2-8beec1daf684-kube-api-access-llwzr\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.835840 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-config-data\") pod \"ceilometer-0\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " pod="openstack/ceilometer-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.915177 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-public-tls-certs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.915429 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.915531 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grlnf\" (UniqueName: \"kubernetes.io/projected/0a0adbf1-c9ff-4373-8531-adc3bd33102b-kube-api-access-grlnf\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.915712 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a0adbf1-c9ff-4373-8531-adc3bd33102b-logs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.915881 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.916005 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-config-data\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.916126 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a0adbf1-c9ff-4373-8531-adc3bd33102b-logs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.918566 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-public-tls-certs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.919372 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-config-data\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.919403 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.919919 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.932557 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grlnf\" (UniqueName: \"kubernetes.io/projected/0a0adbf1-c9ff-4373-8531-adc3bd33102b-kube-api-access-grlnf\") pod \"nova-api-0\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " pod="openstack/nova-api-0" Oct 13 07:07:16 crc kubenswrapper[4664]: I1013 07:07:16.995616 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:07:17 crc kubenswrapper[4664]: I1013 07:07:17.034460 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:17 crc kubenswrapper[4664]: I1013 07:07:17.060088 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6765f17a-8f68-4051-9311-52a89c5632d0" path="/var/lib/kubelet/pods/6765f17a-8f68-4051-9311-52a89c5632d0/volumes" Oct 13 07:07:17 crc kubenswrapper[4664]: I1013 07:07:17.060681 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="994ad42e-896a-4f59-b3f0-1e81f062807e" path="/var/lib/kubelet/pods/994ad42e-896a-4f59-b3f0-1e81f062807e/volumes" Oct 13 07:07:17 crc kubenswrapper[4664]: I1013 07:07:17.544458 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:07:17 crc kubenswrapper[4664]: I1013 07:07:17.714222 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:17 crc kubenswrapper[4664]: W1013 07:07:17.733060 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a0adbf1_c9ff_4373_8531_adc3bd33102b.slice/crio-47d162b7a9751cf9d24e13ca13515517ba27bac5bb1a8ae09bf53eba5b23c52d WatchSource:0}: Error finding container 47d162b7a9751cf9d24e13ca13515517ba27bac5bb1a8ae09bf53eba5b23c52d: Status 404 returned error can't find the container with id 47d162b7a9751cf9d24e13ca13515517ba27bac5bb1a8ae09bf53eba5b23c52d Oct 13 07:07:17 crc kubenswrapper[4664]: I1013 07:07:17.777628 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.516957 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerStarted","Data":"99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b"} Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.517323 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerStarted","Data":"7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb"} Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.517342 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerStarted","Data":"61b009cfb7cfa102aba26aae41c8eaa90937195b3ee76f9261b671d359980850"} Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.519705 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a0adbf1-c9ff-4373-8531-adc3bd33102b","Type":"ContainerStarted","Data":"96e5783a66f4ad97c7bf1aa2f552f1c19a60610d7878121bc7bb454cf5ff7fdb"} Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.519760 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a0adbf1-c9ff-4373-8531-adc3bd33102b","Type":"ContainerStarted","Data":"219657d35265ae27d0314817dbadaea001c1e679cdb8838d0b5dbc9006bc9ff0"} Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.519774 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a0adbf1-c9ff-4373-8531-adc3bd33102b","Type":"ContainerStarted","Data":"47d162b7a9751cf9d24e13ca13515517ba27bac5bb1a8ae09bf53eba5b23c52d"} Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.552145 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.552119096 podStartE2EDuration="2.552119096s" podCreationTimestamp="2025-10-13 07:07:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:07:18.54257123 +0000 UTC m=+1246.230016432" watchObservedRunningTime="2025-10-13 07:07:18.552119096 +0000 UTC m=+1246.239564288" Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.851960 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.982241 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69fd679865-wq8lz"] Oct 13 07:07:18 crc kubenswrapper[4664]: I1013 07:07:18.982556 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" podUID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerName="dnsmasq-dns" containerID="cri-o://86fca4589d305b331ec63e8d7d414f93c8a8bcfff146c29aa2cf5a69eb3be901" gracePeriod=10 Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.564226 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerStarted","Data":"f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd"} Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.570267 4664 generic.go:334] "Generic (PLEG): container finished" podID="98cb4b7a-e3b0-496a-8b4c-71c78b879e44" containerID="5bed13129d9f588d4806076dd61b7882f0673a7c3684bacdf19306bd9e51287e" exitCode=0 Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.570385 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9qtdr" event={"ID":"98cb4b7a-e3b0-496a-8b4c-71c78b879e44","Type":"ContainerDied","Data":"5bed13129d9f588d4806076dd61b7882f0673a7c3684bacdf19306bd9e51287e"} Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.576665 4664 generic.go:334] "Generic (PLEG): container finished" podID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerID="86fca4589d305b331ec63e8d7d414f93c8a8bcfff146c29aa2cf5a69eb3be901" exitCode=0 Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.576738 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" event={"ID":"9cb0e2b8-6d81-49b4-850e-8aa72b02c626","Type":"ContainerDied","Data":"86fca4589d305b331ec63e8d7d414f93c8a8bcfff146c29aa2cf5a69eb3be901"} Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.576784 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" event={"ID":"9cb0e2b8-6d81-49b4-850e-8aa72b02c626","Type":"ContainerDied","Data":"d997cdb1f495af536868f698a4be448a1ab9dd853235ad712d354b5c487c62eb"} Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.576814 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d997cdb1f495af536868f698a4be448a1ab9dd853235ad712d354b5c487c62eb" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.590843 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.673439 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wr4pc\" (UniqueName: \"kubernetes.io/projected/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-kube-api-access-wr4pc\") pod \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.673545 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-svc\") pod \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.673595 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-nb\") pod \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.673679 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-swift-storage-0\") pod \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.673736 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-sb\") pod \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.673756 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-config\") pod \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\" (UID: \"9cb0e2b8-6d81-49b4-850e-8aa72b02c626\") " Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.705527 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-kube-api-access-wr4pc" (OuterVolumeSpecName: "kube-api-access-wr4pc") pod "9cb0e2b8-6d81-49b4-850e-8aa72b02c626" (UID: "9cb0e2b8-6d81-49b4-850e-8aa72b02c626"). InnerVolumeSpecName "kube-api-access-wr4pc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.757115 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9cb0e2b8-6d81-49b4-850e-8aa72b02c626" (UID: "9cb0e2b8-6d81-49b4-850e-8aa72b02c626"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.776063 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wr4pc\" (UniqueName: \"kubernetes.io/projected/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-kube-api-access-wr4pc\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.776089 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.781307 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9cb0e2b8-6d81-49b4-850e-8aa72b02c626" (UID: "9cb0e2b8-6d81-49b4-850e-8aa72b02c626"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.790690 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9cb0e2b8-6d81-49b4-850e-8aa72b02c626" (UID: "9cb0e2b8-6d81-49b4-850e-8aa72b02c626"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.801217 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9cb0e2b8-6d81-49b4-850e-8aa72b02c626" (UID: "9cb0e2b8-6d81-49b4-850e-8aa72b02c626"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.820756 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-config" (OuterVolumeSpecName: "config") pod "9cb0e2b8-6d81-49b4-850e-8aa72b02c626" (UID: "9cb0e2b8-6d81-49b4-850e-8aa72b02c626"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.877814 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.877855 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.877865 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:19 crc kubenswrapper[4664]: I1013 07:07:19.877873 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cb0e2b8-6d81-49b4-850e-8aa72b02c626-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:20 crc kubenswrapper[4664]: I1013 07:07:20.585262 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69fd679865-wq8lz" Oct 13 07:07:20 crc kubenswrapper[4664]: I1013 07:07:20.639100 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69fd679865-wq8lz"] Oct 13 07:07:20 crc kubenswrapper[4664]: I1013 07:07:20.646988 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-69fd679865-wq8lz"] Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.011209 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.061460 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" path="/var/lib/kubelet/pods/9cb0e2b8-6d81-49b4-850e-8aa72b02c626/volumes" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.101610 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-scripts\") pod \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.101694 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-combined-ca-bundle\") pod \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.101772 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7brw\" (UniqueName: \"kubernetes.io/projected/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-kube-api-access-g7brw\") pod \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.101787 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-config-data\") pod \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\" (UID: \"98cb4b7a-e3b0-496a-8b4c-71c78b879e44\") " Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.110324 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-kube-api-access-g7brw" (OuterVolumeSpecName: "kube-api-access-g7brw") pod "98cb4b7a-e3b0-496a-8b4c-71c78b879e44" (UID: "98cb4b7a-e3b0-496a-8b4c-71c78b879e44"). InnerVolumeSpecName "kube-api-access-g7brw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.110414 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-scripts" (OuterVolumeSpecName: "scripts") pod "98cb4b7a-e3b0-496a-8b4c-71c78b879e44" (UID: "98cb4b7a-e3b0-496a-8b4c-71c78b879e44"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.138824 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98cb4b7a-e3b0-496a-8b4c-71c78b879e44" (UID: "98cb4b7a-e3b0-496a-8b4c-71c78b879e44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.146351 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-config-data" (OuterVolumeSpecName: "config-data") pod "98cb4b7a-e3b0-496a-8b4c-71c78b879e44" (UID: "98cb4b7a-e3b0-496a-8b4c-71c78b879e44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.203872 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.203924 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.203937 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7brw\" (UniqueName: \"kubernetes.io/projected/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-kube-api-access-g7brw\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.203949 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98cb4b7a-e3b0-496a-8b4c-71c78b879e44-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.596012 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-9qtdr" event={"ID":"98cb4b7a-e3b0-496a-8b4c-71c78b879e44","Type":"ContainerDied","Data":"7413a1b90fa947bb43fb3f5ef9459ec6f3a6ecd0b85883546e225b01c355ce8f"} Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.596048 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7413a1b90fa947bb43fb3f5ef9459ec6f3a6ecd0b85883546e225b01c355ce8f" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.596101 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-9qtdr" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.616413 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerStarted","Data":"bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729"} Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.617482 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.643047 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.522773241 podStartE2EDuration="5.643029897s" podCreationTimestamp="2025-10-13 07:07:16 +0000 UTC" firstStartedPulling="2025-10-13 07:07:17.553291003 +0000 UTC m=+1245.240736195" lastFinishedPulling="2025-10-13 07:07:20.673547659 +0000 UTC m=+1248.360992851" observedRunningTime="2025-10-13 07:07:21.633094841 +0000 UTC m=+1249.320540044" watchObservedRunningTime="2025-10-13 07:07:21.643029897 +0000 UTC m=+1249.330475089" Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.778419 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.778636 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-log" containerID="cri-o://219657d35265ae27d0314817dbadaea001c1e679cdb8838d0b5dbc9006bc9ff0" gracePeriod=30 Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.778696 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-api" containerID="cri-o://96e5783a66f4ad97c7bf1aa2f552f1c19a60610d7878121bc7bb454cf5ff7fdb" gracePeriod=30 Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.811890 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.812299 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" containerName="nova-scheduler-scheduler" containerID="cri-o://b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" gracePeriod=30 Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.875776 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.876369 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-log" containerID="cri-o://83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0" gracePeriod=30 Oct 13 07:07:21 crc kubenswrapper[4664]: I1013 07:07:21.876783 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-metadata" containerID="cri-o://a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8" gracePeriod=30 Oct 13 07:07:22 crc kubenswrapper[4664]: E1013 07:07:22.379100 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:07:22 crc kubenswrapper[4664]: E1013 07:07:22.380472 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:07:22 crc kubenswrapper[4664]: E1013 07:07:22.386382 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:07:22 crc kubenswrapper[4664]: E1013 07:07:22.386464 4664 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" containerName="nova-scheduler-scheduler" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.627994 4664 generic.go:334] "Generic (PLEG): container finished" podID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerID="83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0" exitCode=143 Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.628422 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43ce84a4-4ba5-4221-803a-af82eec6a563","Type":"ContainerDied","Data":"83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0"} Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.631274 4664 generic.go:334] "Generic (PLEG): container finished" podID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerID="96e5783a66f4ad97c7bf1aa2f552f1c19a60610d7878121bc7bb454cf5ff7fdb" exitCode=0 Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.631346 4664 generic.go:334] "Generic (PLEG): container finished" podID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerID="219657d35265ae27d0314817dbadaea001c1e679cdb8838d0b5dbc9006bc9ff0" exitCode=143 Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.632563 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a0adbf1-c9ff-4373-8531-adc3bd33102b","Type":"ContainerDied","Data":"96e5783a66f4ad97c7bf1aa2f552f1c19a60610d7878121bc7bb454cf5ff7fdb"} Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.632644 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a0adbf1-c9ff-4373-8531-adc3bd33102b","Type":"ContainerDied","Data":"219657d35265ae27d0314817dbadaea001c1e679cdb8838d0b5dbc9006bc9ff0"} Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.732841 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.845431 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-public-tls-certs\") pod \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.845477 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-internal-tls-certs\") pod \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.845606 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a0adbf1-c9ff-4373-8531-adc3bd33102b-logs\") pod \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.845650 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-config-data\") pod \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.845693 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-combined-ca-bundle\") pod \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.845762 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grlnf\" (UniqueName: \"kubernetes.io/projected/0a0adbf1-c9ff-4373-8531-adc3bd33102b-kube-api-access-grlnf\") pod \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\" (UID: \"0a0adbf1-c9ff-4373-8531-adc3bd33102b\") " Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.846297 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a0adbf1-c9ff-4373-8531-adc3bd33102b-logs" (OuterVolumeSpecName: "logs") pod "0a0adbf1-c9ff-4373-8531-adc3bd33102b" (UID: "0a0adbf1-c9ff-4373-8531-adc3bd33102b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.846906 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a0adbf1-c9ff-4373-8531-adc3bd33102b-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.883053 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a0adbf1-c9ff-4373-8531-adc3bd33102b-kube-api-access-grlnf" (OuterVolumeSpecName: "kube-api-access-grlnf") pod "0a0adbf1-c9ff-4373-8531-adc3bd33102b" (UID: "0a0adbf1-c9ff-4373-8531-adc3bd33102b"). InnerVolumeSpecName "kube-api-access-grlnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.890080 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a0adbf1-c9ff-4373-8531-adc3bd33102b" (UID: "0a0adbf1-c9ff-4373-8531-adc3bd33102b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.920915 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0a0adbf1-c9ff-4373-8531-adc3bd33102b" (UID: "0a0adbf1-c9ff-4373-8531-adc3bd33102b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.931421 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-config-data" (OuterVolumeSpecName: "config-data") pod "0a0adbf1-c9ff-4373-8531-adc3bd33102b" (UID: "0a0adbf1-c9ff-4373-8531-adc3bd33102b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.950785 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grlnf\" (UniqueName: \"kubernetes.io/projected/0a0adbf1-c9ff-4373-8531-adc3bd33102b-kube-api-access-grlnf\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.950834 4664 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.950847 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.950857 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:22 crc kubenswrapper[4664]: I1013 07:07:22.964732 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0a0adbf1-c9ff-4373-8531-adc3bd33102b" (UID: "0a0adbf1-c9ff-4373-8531-adc3bd33102b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.052427 4664 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a0adbf1-c9ff-4373-8531-adc3bd33102b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.643500 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.643503 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a0adbf1-c9ff-4373-8531-adc3bd33102b","Type":"ContainerDied","Data":"47d162b7a9751cf9d24e13ca13515517ba27bac5bb1a8ae09bf53eba5b23c52d"} Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.643556 4664 scope.go:117] "RemoveContainer" containerID="96e5783a66f4ad97c7bf1aa2f552f1c19a60610d7878121bc7bb454cf5ff7fdb" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.671581 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.681662 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.703748 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:23 crc kubenswrapper[4664]: E1013 07:07:23.704389 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-log" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.704498 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-log" Oct 13 07:07:23 crc kubenswrapper[4664]: E1013 07:07:23.704610 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerName="dnsmasq-dns" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.704703 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerName="dnsmasq-dns" Oct 13 07:07:23 crc kubenswrapper[4664]: E1013 07:07:23.704784 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-api" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.704868 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-api" Oct 13 07:07:23 crc kubenswrapper[4664]: E1013 07:07:23.704932 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98cb4b7a-e3b0-496a-8b4c-71c78b879e44" containerName="nova-manage" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.704978 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="98cb4b7a-e3b0-496a-8b4c-71c78b879e44" containerName="nova-manage" Oct 13 07:07:23 crc kubenswrapper[4664]: E1013 07:07:23.705032 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerName="init" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.705093 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerName="init" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.705350 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="98cb4b7a-e3b0-496a-8b4c-71c78b879e44" containerName="nova-manage" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.705450 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-log" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.705501 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" containerName="nova-api-api" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.705561 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cb0e2b8-6d81-49b4-850e-8aa72b02c626" containerName="dnsmasq-dns" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.715880 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.719425 4664 scope.go:117] "RemoveContainer" containerID="219657d35265ae27d0314817dbadaea001c1e679cdb8838d0b5dbc9006bc9ff0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.722175 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.722371 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.722724 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.727561 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.778781 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-config-data\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.778831 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-public-tls-certs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.778866 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.778935 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-logs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.778959 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp7mc\" (UniqueName: \"kubernetes.io/projected/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-kube-api-access-pp7mc\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.779013 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.880060 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-config-data\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.880099 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-public-tls-certs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.880133 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.880193 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-logs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.880216 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp7mc\" (UniqueName: \"kubernetes.io/projected/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-kube-api-access-pp7mc\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.880251 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.880865 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-logs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.889022 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.896402 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.902390 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-public-tls-certs\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.905375 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp7mc\" (UniqueName: \"kubernetes.io/projected/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-kube-api-access-pp7mc\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:23 crc kubenswrapper[4664]: I1013 07:07:23.919933 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769-config-data\") pod \"nova-api-0\" (UID: \"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769\") " pod="openstack/nova-api-0" Oct 13 07:07:24 crc kubenswrapper[4664]: I1013 07:07:24.085677 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 13 07:07:24 crc kubenswrapper[4664]: I1013 07:07:24.584186 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 13 07:07:24 crc kubenswrapper[4664]: W1013 07:07:24.592902 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0e3dfbe_5a9a_49f7_b6eb_ff53f02cb769.slice/crio-08bfa97f1e5e5a97168dd6a251bde5a8159d7c30840b52ab18bfa2f85182f54d WatchSource:0}: Error finding container 08bfa97f1e5e5a97168dd6a251bde5a8159d7c30840b52ab18bfa2f85182f54d: Status 404 returned error can't find the container with id 08bfa97f1e5e5a97168dd6a251bde5a8159d7c30840b52ab18bfa2f85182f54d Oct 13 07:07:24 crc kubenswrapper[4664]: I1013 07:07:24.653873 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769","Type":"ContainerStarted","Data":"08bfa97f1e5e5a97168dd6a251bde5a8159d7c30840b52ab18bfa2f85182f54d"} Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.003637 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": read tcp 10.217.0.2:33668->10.217.0.207:8775: read: connection reset by peer" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.004740 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": read tcp 10.217.0.2:33680->10.217.0.207:8775: read: connection reset by peer" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.070339 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a0adbf1-c9ff-4373-8531-adc3bd33102b" path="/var/lib/kubelet/pods/0a0adbf1-c9ff-4373-8531-adc3bd33102b/volumes" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.532574 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.622119 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43ce84a4-4ba5-4221-803a-af82eec6a563-logs\") pod \"43ce84a4-4ba5-4221-803a-af82eec6a563\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.622446 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-nova-metadata-tls-certs\") pod \"43ce84a4-4ba5-4221-803a-af82eec6a563\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.622488 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-combined-ca-bundle\") pod \"43ce84a4-4ba5-4221-803a-af82eec6a563\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.622555 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkbh4\" (UniqueName: \"kubernetes.io/projected/43ce84a4-4ba5-4221-803a-af82eec6a563-kube-api-access-zkbh4\") pod \"43ce84a4-4ba5-4221-803a-af82eec6a563\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.622590 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-config-data\") pod \"43ce84a4-4ba5-4221-803a-af82eec6a563\" (UID: \"43ce84a4-4ba5-4221-803a-af82eec6a563\") " Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.627295 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43ce84a4-4ba5-4221-803a-af82eec6a563-logs" (OuterVolumeSpecName: "logs") pod "43ce84a4-4ba5-4221-803a-af82eec6a563" (UID: "43ce84a4-4ba5-4221-803a-af82eec6a563"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.647641 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43ce84a4-4ba5-4221-803a-af82eec6a563-kube-api-access-zkbh4" (OuterVolumeSpecName: "kube-api-access-zkbh4") pod "43ce84a4-4ba5-4221-803a-af82eec6a563" (UID: "43ce84a4-4ba5-4221-803a-af82eec6a563"). InnerVolumeSpecName "kube-api-access-zkbh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.675334 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-config-data" (OuterVolumeSpecName: "config-data") pod "43ce84a4-4ba5-4221-803a-af82eec6a563" (UID: "43ce84a4-4ba5-4221-803a-af82eec6a563"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.686391 4664 generic.go:334] "Generic (PLEG): container finished" podID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerID="a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8" exitCode=0 Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.686477 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43ce84a4-4ba5-4221-803a-af82eec6a563","Type":"ContainerDied","Data":"a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8"} Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.686505 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"43ce84a4-4ba5-4221-803a-af82eec6a563","Type":"ContainerDied","Data":"4615143b34c2db742aa2eb2568613898b13169140bc17fd60b22f6b16fd4ec83"} Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.686521 4664 scope.go:117] "RemoveContainer" containerID="a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.686748 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.701060 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769","Type":"ContainerStarted","Data":"2b6e60e394cef23339959c3ab2162df1a3f5b95369e3b80c55781b0984f0a77e"} Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.701099 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769","Type":"ContainerStarted","Data":"e02a40c81aec0d58222a8f7fbcf86c5ff6152921f4e1250eeeb185e0c7326b9d"} Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.719326 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43ce84a4-4ba5-4221-803a-af82eec6a563" (UID: "43ce84a4-4ba5-4221-803a-af82eec6a563"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.729991 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.730027 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkbh4\" (UniqueName: \"kubernetes.io/projected/43ce84a4-4ba5-4221-803a-af82eec6a563-kube-api-access-zkbh4\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.730041 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.730055 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43ce84a4-4ba5-4221-803a-af82eec6a563-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.740443 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.740418936 podStartE2EDuration="2.740418936s" podCreationTimestamp="2025-10-13 07:07:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:07:25.731065156 +0000 UTC m=+1253.418510348" watchObservedRunningTime="2025-10-13 07:07:25.740418936 +0000 UTC m=+1253.427864118" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.743640 4664 scope.go:117] "RemoveContainer" containerID="83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.772035 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "43ce84a4-4ba5-4221-803a-af82eec6a563" (UID: "43ce84a4-4ba5-4221-803a-af82eec6a563"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.773039 4664 scope.go:117] "RemoveContainer" containerID="a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8" Oct 13 07:07:25 crc kubenswrapper[4664]: E1013 07:07:25.773446 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8\": container with ID starting with a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8 not found: ID does not exist" containerID="a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.773473 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8"} err="failed to get container status \"a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8\": rpc error: code = NotFound desc = could not find container \"a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8\": container with ID starting with a9caf49d1706641c446552451c8735eed8d8a6c11c575847b1b4658730eac9b8 not found: ID does not exist" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.773492 4664 scope.go:117] "RemoveContainer" containerID="83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0" Oct 13 07:07:25 crc kubenswrapper[4664]: E1013 07:07:25.773970 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0\": container with ID starting with 83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0 not found: ID does not exist" containerID="83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.773991 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0"} err="failed to get container status \"83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0\": rpc error: code = NotFound desc = could not find container \"83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0\": container with ID starting with 83d3924b81898190e0f7b8fda29ae1f545863f0eb46d0bcfdd6f99f8ab2978f0 not found: ID does not exist" Oct 13 07:07:25 crc kubenswrapper[4664]: I1013 07:07:25.831788 4664 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/43ce84a4-4ba5-4221-803a-af82eec6a563-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.026001 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.037416 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.059308 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:07:26 crc kubenswrapper[4664]: E1013 07:07:26.059743 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-metadata" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.059760 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-metadata" Oct 13 07:07:26 crc kubenswrapper[4664]: E1013 07:07:26.059774 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-log" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.059781 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-log" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.059998 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-log" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.060031 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" containerName="nova-metadata-metadata" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.061014 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.063400 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.065048 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.076600 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.138189 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7276f420-9ae8-46b9-a55f-903114d2f25c-logs\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.138537 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-config-data\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.138593 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xtzr\" (UniqueName: \"kubernetes.io/projected/7276f420-9ae8-46b9-a55f-903114d2f25c-kube-api-access-9xtzr\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.138720 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.138971 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.240453 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-config-data\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.240489 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xtzr\" (UniqueName: \"kubernetes.io/projected/7276f420-9ae8-46b9-a55f-903114d2f25c-kube-api-access-9xtzr\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.240522 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.240589 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.240617 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7276f420-9ae8-46b9-a55f-903114d2f25c-logs\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.240966 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7276f420-9ae8-46b9-a55f-903114d2f25c-logs\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.257437 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.257478 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.258100 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7276f420-9ae8-46b9-a55f-903114d2f25c-config-data\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.271616 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xtzr\" (UniqueName: \"kubernetes.io/projected/7276f420-9ae8-46b9-a55f-903114d2f25c-kube-api-access-9xtzr\") pod \"nova-metadata-0\" (UID: \"7276f420-9ae8-46b9-a55f-903114d2f25c\") " pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.400167 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 13 07:07:26 crc kubenswrapper[4664]: I1013 07:07:26.886641 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 13 07:07:26 crc kubenswrapper[4664]: W1013 07:07:26.887915 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7276f420_9ae8_46b9_a55f_903114d2f25c.slice/crio-eebbb260949333472a796e3b95b28d431e1029e4f25a9c50118aaac579905198 WatchSource:0}: Error finding container eebbb260949333472a796e3b95b28d431e1029e4f25a9c50118aaac579905198: Status 404 returned error can't find the container with id eebbb260949333472a796e3b95b28d431e1029e4f25a9c50118aaac579905198 Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.058043 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43ce84a4-4ba5-4221-803a-af82eec6a563" path="/var/lib/kubelet/pods/43ce84a4-4ba5-4221-803a-af82eec6a563/volumes" Oct 13 07:07:27 crc kubenswrapper[4664]: E1013 07:07:27.369726 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75 is running failed: container process not found" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:07:27 crc kubenswrapper[4664]: E1013 07:07:27.370531 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75 is running failed: container process not found" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:07:27 crc kubenswrapper[4664]: E1013 07:07:27.370811 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75 is running failed: container process not found" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 13 07:07:27 crc kubenswrapper[4664]: E1013 07:07:27.370846 4664 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" containerName="nova-scheduler-scheduler" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.522812 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.567882 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26rtj\" (UniqueName: \"kubernetes.io/projected/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-kube-api-access-26rtj\") pod \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.568074 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-config-data\") pod \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.568169 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-combined-ca-bundle\") pod \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\" (UID: \"bb8d5a94-642c-4dff-8ace-ebd7c844bb76\") " Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.581257 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-kube-api-access-26rtj" (OuterVolumeSpecName: "kube-api-access-26rtj") pod "bb8d5a94-642c-4dff-8ace-ebd7c844bb76" (UID: "bb8d5a94-642c-4dff-8ace-ebd7c844bb76"). InnerVolumeSpecName "kube-api-access-26rtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.607089 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb8d5a94-642c-4dff-8ace-ebd7c844bb76" (UID: "bb8d5a94-642c-4dff-8ace-ebd7c844bb76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.621881 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-config-data" (OuterVolumeSpecName: "config-data") pod "bb8d5a94-642c-4dff-8ace-ebd7c844bb76" (UID: "bb8d5a94-642c-4dff-8ace-ebd7c844bb76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.670320 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.670369 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.670391 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26rtj\" (UniqueName: \"kubernetes.io/projected/bb8d5a94-642c-4dff-8ace-ebd7c844bb76-kube-api-access-26rtj\") on node \"crc\" DevicePath \"\"" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.723071 4664 generic.go:334] "Generic (PLEG): container finished" podID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" exitCode=0 Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.723098 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.723130 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bb8d5a94-642c-4dff-8ace-ebd7c844bb76","Type":"ContainerDied","Data":"b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75"} Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.723163 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bb8d5a94-642c-4dff-8ace-ebd7c844bb76","Type":"ContainerDied","Data":"41d54a52b346121453c2954a0e69d7e5278680a774c0dc3cac3bc4266f5ca3c9"} Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.723208 4664 scope.go:117] "RemoveContainer" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.729595 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7276f420-9ae8-46b9-a55f-903114d2f25c","Type":"ContainerStarted","Data":"d42289a4dd78afd4fcbfcc75b0f6849e155765b347284234115fb12b01414d6e"} Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.729961 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7276f420-9ae8-46b9-a55f-903114d2f25c","Type":"ContainerStarted","Data":"5cb603d19e175f0c0d56c4095c9d40522635fccfef26672a4f64e12d3497f7fa"} Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.729973 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7276f420-9ae8-46b9-a55f-903114d2f25c","Type":"ContainerStarted","Data":"eebbb260949333472a796e3b95b28d431e1029e4f25a9c50118aaac579905198"} Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.752309 4664 scope.go:117] "RemoveContainer" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" Oct 13 07:07:27 crc kubenswrapper[4664]: E1013 07:07:27.752618 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75\": container with ID starting with b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75 not found: ID does not exist" containerID="b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.752644 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75"} err="failed to get container status \"b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75\": rpc error: code = NotFound desc = could not find container \"b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75\": container with ID starting with b77751d4bf5164e492361468096948b0a5cdee2a8dcf3c68a7b624b5b7e28e75 not found: ID does not exist" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.755462 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.755421165 podStartE2EDuration="1.755421165s" podCreationTimestamp="2025-10-13 07:07:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:07:27.744195725 +0000 UTC m=+1255.431640927" watchObservedRunningTime="2025-10-13 07:07:27.755421165 +0000 UTC m=+1255.442866357" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.774354 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.786347 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.792717 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:07:27 crc kubenswrapper[4664]: E1013 07:07:27.793297 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" containerName="nova-scheduler-scheduler" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.793382 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" containerName="nova-scheduler-scheduler" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.793677 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" containerName="nova-scheduler-scheduler" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.794504 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.796959 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.800857 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.873831 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.874098 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcnhh\" (UniqueName: \"kubernetes.io/projected/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-kube-api-access-jcnhh\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.874249 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-config-data\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.975603 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.975675 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcnhh\" (UniqueName: \"kubernetes.io/projected/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-kube-api-access-jcnhh\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.975731 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-config-data\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.979783 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.980506 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-config-data\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:27 crc kubenswrapper[4664]: I1013 07:07:27.995392 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcnhh\" (UniqueName: \"kubernetes.io/projected/b4ae1d1c-38d6-4893-955d-c0d736f6b32f-kube-api-access-jcnhh\") pod \"nova-scheduler-0\" (UID: \"b4ae1d1c-38d6-4893-955d-c0d736f6b32f\") " pod="openstack/nova-scheduler-0" Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.115332 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.635081 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 13 07:07:28 crc kubenswrapper[4664]: W1013 07:07:28.636568 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4ae1d1c_38d6_4893_955d_c0d736f6b32f.slice/crio-ae0c653b18b74b982cfd6361af367f5709b1d3a076d2c42fd568718ff1845284 WatchSource:0}: Error finding container ae0c653b18b74b982cfd6361af367f5709b1d3a076d2c42fd568718ff1845284: Status 404 returned error can't find the container with id ae0c653b18b74b982cfd6361af367f5709b1d3a076d2c42fd568718ff1845284 Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.745564 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b4ae1d1c-38d6-4893-955d-c0d736f6b32f","Type":"ContainerStarted","Data":"ae0c653b18b74b982cfd6361af367f5709b1d3a076d2c42fd568718ff1845284"} Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.813088 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.813182 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.813250 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.814233 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"961db826ff7e07c02c9982962c5607bbdb4ed756076ad7120e5104e7d1c7cca4"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:07:28 crc kubenswrapper[4664]: I1013 07:07:28.814327 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://961db826ff7e07c02c9982962c5607bbdb4ed756076ad7120e5104e7d1c7cca4" gracePeriod=600 Oct 13 07:07:29 crc kubenswrapper[4664]: I1013 07:07:29.059931 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb8d5a94-642c-4dff-8ace-ebd7c844bb76" path="/var/lib/kubelet/pods/bb8d5a94-642c-4dff-8ace-ebd7c844bb76/volumes" Oct 13 07:07:29 crc kubenswrapper[4664]: I1013 07:07:29.760201 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="961db826ff7e07c02c9982962c5607bbdb4ed756076ad7120e5104e7d1c7cca4" exitCode=0 Oct 13 07:07:29 crc kubenswrapper[4664]: I1013 07:07:29.760253 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"961db826ff7e07c02c9982962c5607bbdb4ed756076ad7120e5104e7d1c7cca4"} Oct 13 07:07:29 crc kubenswrapper[4664]: I1013 07:07:29.760332 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"d5f2cde2ed697e7f4fec7fa8bcc15fd54a4ffa34386ca0974f63cbbf3e4882ad"} Oct 13 07:07:29 crc kubenswrapper[4664]: I1013 07:07:29.760366 4664 scope.go:117] "RemoveContainer" containerID="90ae6fb696be24e0d495b69a3864f01bdb0ba4e52bf052b9a768126a14f3c7e1" Oct 13 07:07:29 crc kubenswrapper[4664]: I1013 07:07:29.763185 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b4ae1d1c-38d6-4893-955d-c0d736f6b32f","Type":"ContainerStarted","Data":"88ee2e936b936bfe53f31bf4563dd8720c4dd2f04c2536a8699ff201fc6822d1"} Oct 13 07:07:29 crc kubenswrapper[4664]: I1013 07:07:29.814026 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.814006419 podStartE2EDuration="2.814006419s" podCreationTimestamp="2025-10-13 07:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:07:29.808201414 +0000 UTC m=+1257.495646646" watchObservedRunningTime="2025-10-13 07:07:29.814006419 +0000 UTC m=+1257.501451641" Oct 13 07:07:31 crc kubenswrapper[4664]: I1013 07:07:31.400458 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 13 07:07:31 crc kubenswrapper[4664]: I1013 07:07:31.401756 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 13 07:07:33 crc kubenswrapper[4664]: I1013 07:07:33.116086 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 13 07:07:34 crc kubenswrapper[4664]: I1013 07:07:34.086698 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 13 07:07:34 crc kubenswrapper[4664]: I1013 07:07:34.086751 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 13 07:07:35 crc kubenswrapper[4664]: I1013 07:07:35.101914 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.218:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:07:35 crc kubenswrapper[4664]: I1013 07:07:35.101914 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.218:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:07:36 crc kubenswrapper[4664]: I1013 07:07:36.401185 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 13 07:07:36 crc kubenswrapper[4664]: I1013 07:07:36.402889 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 13 07:07:37 crc kubenswrapper[4664]: I1013 07:07:37.415965 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7276f420-9ae8-46b9-a55f-903114d2f25c" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.219:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:07:37 crc kubenswrapper[4664]: I1013 07:07:37.416725 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7276f420-9ae8-46b9-a55f-903114d2f25c" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.219:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:07:37 crc kubenswrapper[4664]: I1013 07:07:37.859270 4664 generic.go:334] "Generic (PLEG): container finished" podID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerID="c8df9bd385c5532e5d86f910ff25bb0dd8e4b604a40eb79ea6d7d648ffc05ad6" exitCode=137 Oct 13 07:07:37 crc kubenswrapper[4664]: I1013 07:07:37.859326 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerDied","Data":"c8df9bd385c5532e5d86f910ff25bb0dd8e4b604a40eb79ea6d7d648ffc05ad6"} Oct 13 07:07:37 crc kubenswrapper[4664]: I1013 07:07:37.859364 4664 scope.go:117] "RemoveContainer" containerID="952bdb7363dae79d4926e1ca1332d7aa4c3a95f0e8a9bb7e1c14b8d80cd4c504" Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.115952 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.155428 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.870731 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerStarted","Data":"657e1b5cae9fbe4bd1f09c723b2e6ad250d8d31ff00e3536e5c50a48bb0eca8c"} Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.874942 4664 generic.go:334] "Generic (PLEG): container finished" podID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerID="716b360bd426e69bd996488b830583fb95239ac8059eb566e7ac7b80470cc331" exitCode=137 Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.874999 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerDied","Data":"716b360bd426e69bd996488b830583fb95239ac8059eb566e7ac7b80470cc331"} Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.875044 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7d78c558d-rjg4v" event={"ID":"786f35fd-a7cc-4749-bc5e-47c28ffa4245","Type":"ContainerStarted","Data":"0d4163f33dad43c69e53a29241e854f38b2e1bb316a8bad5905dbd1df60b9c4e"} Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.875060 4664 scope.go:117] "RemoveContainer" containerID="3d2f032d21ef8d8ed4b549c11a26aedfe8563fd4c11924d540cf80427fd370c5" Oct 13 07:07:38 crc kubenswrapper[4664]: I1013 07:07:38.930755 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 13 07:07:44 crc kubenswrapper[4664]: I1013 07:07:44.093004 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 13 07:07:44 crc kubenswrapper[4664]: I1013 07:07:44.095037 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 13 07:07:44 crc kubenswrapper[4664]: I1013 07:07:44.096181 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 13 07:07:44 crc kubenswrapper[4664]: I1013 07:07:44.102092 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 13 07:07:44 crc kubenswrapper[4664]: I1013 07:07:44.939062 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 13 07:07:44 crc kubenswrapper[4664]: I1013 07:07:44.945067 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 13 07:07:46 crc kubenswrapper[4664]: I1013 07:07:46.408375 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 13 07:07:46 crc kubenswrapper[4664]: I1013 07:07:46.411171 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 13 07:07:46 crc kubenswrapper[4664]: I1013 07:07:46.414779 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 13 07:07:46 crc kubenswrapper[4664]: I1013 07:07:46.973578 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 13 07:07:47 crc kubenswrapper[4664]: I1013 07:07:47.031845 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 13 07:07:47 crc kubenswrapper[4664]: I1013 07:07:47.739295 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:07:47 crc kubenswrapper[4664]: I1013 07:07:47.740834 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:07:47 crc kubenswrapper[4664]: I1013 07:07:47.870981 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:07:47 crc kubenswrapper[4664]: I1013 07:07:47.871808 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:07:57 crc kubenswrapper[4664]: I1013 07:07:57.740318 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:07:57 crc kubenswrapper[4664]: I1013 07:07:57.872185 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7d78c558d-rjg4v" podUID="786f35fd-a7cc-4749-bc5e-47c28ffa4245" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Oct 13 07:08:10 crc kubenswrapper[4664]: I1013 07:08:10.304767 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:08:10 crc kubenswrapper[4664]: I1013 07:08:10.337180 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:08:12 crc kubenswrapper[4664]: I1013 07:08:12.139122 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:08:12 crc kubenswrapper[4664]: I1013 07:08:12.160108 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7d78c558d-rjg4v" Oct 13 07:08:12 crc kubenswrapper[4664]: I1013 07:08:12.255771 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8487d6c5d4-cgnm9"] Oct 13 07:08:12 crc kubenswrapper[4664]: I1013 07:08:12.270286 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon-log" containerID="cri-o://b3140382539f3da699fccec40fbcb27ac5fe0d631e911ac7ccdf3942dc8804fe" gracePeriod=30 Oct 13 07:08:12 crc kubenswrapper[4664]: I1013 07:08:12.270419 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" containerID="cri-o://657e1b5cae9fbe4bd1f09c723b2e6ad250d8d31ff00e3536e5c50a48bb0eca8c" gracePeriod=30 Oct 13 07:08:15 crc kubenswrapper[4664]: I1013 07:08:15.480188 4664 scope.go:117] "RemoveContainer" containerID="523a636f101b652576daab8c768617c1f39e3f5eb0cf5b8a26d8f4174b0873e5" Oct 13 07:08:15 crc kubenswrapper[4664]: I1013 07:08:15.580895 4664 scope.go:117] "RemoveContainer" containerID="5a8965dffea5ccc8ebe9134d099376d1687efce75e9bcf15c5cf4690c48bce8a" Oct 13 07:08:16 crc kubenswrapper[4664]: I1013 07:08:16.309123 4664 generic.go:334] "Generic (PLEG): container finished" podID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerID="657e1b5cae9fbe4bd1f09c723b2e6ad250d8d31ff00e3536e5c50a48bb0eca8c" exitCode=0 Oct 13 07:08:16 crc kubenswrapper[4664]: I1013 07:08:16.309183 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerDied","Data":"657e1b5cae9fbe4bd1f09c723b2e6ad250d8d31ff00e3536e5c50a48bb0eca8c"} Oct 13 07:08:16 crc kubenswrapper[4664]: I1013 07:08:16.309512 4664 scope.go:117] "RemoveContainer" containerID="c8df9bd385c5532e5d86f910ff25bb0dd8e4b604a40eb79ea6d7d648ffc05ad6" Oct 13 07:08:17 crc kubenswrapper[4664]: I1013 07:08:17.739673 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:08:20 crc kubenswrapper[4664]: I1013 07:08:20.434344 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:08:21 crc kubenswrapper[4664]: I1013 07:08:21.412388 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:08:25 crc kubenswrapper[4664]: I1013 07:08:25.012589 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerName="rabbitmq" containerID="cri-o://7de8814af1773315d641d3144577e78145e2036b7f885755ec4daf8d88b0c09d" gracePeriod=604796 Oct 13 07:08:25 crc kubenswrapper[4664]: I1013 07:08:25.538983 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerName="rabbitmq" containerID="cri-o://2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41" gracePeriod=604796 Oct 13 07:08:26 crc kubenswrapper[4664]: I1013 07:08:26.598688 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Oct 13 07:08:27 crc kubenswrapper[4664]: I1013 07:08:27.007427 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Oct 13 07:08:27 crc kubenswrapper[4664]: I1013 07:08:27.739508 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.482702 4664 generic.go:334] "Generic (PLEG): container finished" podID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerID="7de8814af1773315d641d3144577e78145e2036b7f885755ec4daf8d88b0c09d" exitCode=0 Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.483168 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b3c7c8de-e6ed-440c-9f15-635fa77e35e7","Type":"ContainerDied","Data":"7de8814af1773315d641d3144577e78145e2036b7f885755ec4daf8d88b0c09d"} Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.807985 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.939289 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd6x2\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-kube-api-access-hd6x2\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.939337 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-plugins\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.939361 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-confd\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.939393 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-erlang-cookie-secret\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.941856 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-plugins-conf\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.941955 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-pod-info\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.942007 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-erlang-cookie\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.942065 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-tls\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.942086 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-config-data\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.942171 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.942199 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-server-conf\") pod \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\" (UID: \"b3c7c8de-e6ed-440c-9f15-635fa77e35e7\") " Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.948885 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.948935 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.949917 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.951707 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.962151 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.963071 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-kube-api-access-hd6x2" (OuterVolumeSpecName: "kube-api-access-hd6x2") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "kube-api-access-hd6x2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.966189 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 07:08:31 crc kubenswrapper[4664]: I1013 07:08:31.968588 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-pod-info" (OuterVolumeSpecName: "pod-info") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.002391 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-config-data" (OuterVolumeSpecName: "config-data") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.044215 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.044414 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.045204 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.045302 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd6x2\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-kube-api-access-hd6x2\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.045630 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.045720 4664 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.045808 4664 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.045903 4664 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-pod-info\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.045990 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.063223 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.077931 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.148533 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-plugins\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.148843 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-confd\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.148920 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-plugins-conf\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149007 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/180df95d-8f2f-4266-ae45-f9237a3aa1b7-pod-info\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149071 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf9g2\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-kube-api-access-gf9g2\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149136 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-server-conf\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149195 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-config-data\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149328 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149433 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-erlang-cookie\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149553 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-tls\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.149649 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/180df95d-8f2f-4266-ae45-f9237a3aa1b7-erlang-cookie-secret\") pod \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\" (UID: \"180df95d-8f2f-4266-ae45-f9237a3aa1b7\") " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.150114 4664 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.150653 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-server-conf" (OuterVolumeSpecName: "server-conf") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.152042 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.153206 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.159061 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.161424 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/180df95d-8f2f-4266-ae45-f9237a3aa1b7-pod-info" (OuterVolumeSpecName: "pod-info") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.163002 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.163527 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b3c7c8de-e6ed-440c-9f15-635fa77e35e7" (UID: "b3c7c8de-e6ed-440c-9f15-635fa77e35e7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.174414 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-kube-api-access-gf9g2" (OuterVolumeSpecName: "kube-api-access-gf9g2") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "kube-api-access-gf9g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.196332 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.201818 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/180df95d-8f2f-4266-ae45-f9237a3aa1b7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.202284 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-config-data" (OuterVolumeSpecName: "config-data") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252254 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252285 4664 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-server-conf\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252297 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252307 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3c7c8de-e6ed-440c-9f15-635fa77e35e7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252315 4664 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/180df95d-8f2f-4266-ae45-f9237a3aa1b7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252327 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252335 4664 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252343 4664 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/180df95d-8f2f-4266-ae45-f9237a3aa1b7-pod-info\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252351 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf9g2\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-kube-api-access-gf9g2\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252359 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.252390 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.279661 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.316996 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-server-conf" (OuterVolumeSpecName: "server-conf") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.323828 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "180df95d-8f2f-4266-ae45-f9237a3aa1b7" (UID: "180df95d-8f2f-4266-ae45-f9237a3aa1b7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.353554 4664 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.353583 4664 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/180df95d-8f2f-4266-ae45-f9237a3aa1b7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.353592 4664 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/180df95d-8f2f-4266-ae45-f9237a3aa1b7-server-conf\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.494704 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b3c7c8de-e6ed-440c-9f15-635fa77e35e7","Type":"ContainerDied","Data":"749a588fdc08c9d7389a9420d82cb807ee46c20a987c6950d7028b88f7b0c32d"} Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.494780 4664 scope.go:117] "RemoveContainer" containerID="7de8814af1773315d641d3144577e78145e2036b7f885755ec4daf8d88b0c09d" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.494750 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.498318 4664 generic.go:334] "Generic (PLEG): container finished" podID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerID="2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41" exitCode=0 Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.498354 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"180df95d-8f2f-4266-ae45-f9237a3aa1b7","Type":"ContainerDied","Data":"2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41"} Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.498376 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"180df95d-8f2f-4266-ae45-f9237a3aa1b7","Type":"ContainerDied","Data":"8777a4a620be9857a21e071cd5c8efbd1fefafb5e0cf37c44cf654df2579f137"} Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.498432 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.520024 4664 scope.go:117] "RemoveContainer" containerID="fa4dc12ff6be47dd21c58800f264181d51d1d13bcb68c135a914dab962750129" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.550497 4664 scope.go:117] "RemoveContainer" containerID="2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.556016 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.568580 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.601122 4664 scope.go:117] "RemoveContainer" containerID="02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.617129 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.623730 4664 scope.go:117] "RemoveContainer" containerID="2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41" Oct 13 07:08:32 crc kubenswrapper[4664]: E1013 07:08:32.624255 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41\": container with ID starting with 2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41 not found: ID does not exist" containerID="2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.624295 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41"} err="failed to get container status \"2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41\": rpc error: code = NotFound desc = could not find container \"2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41\": container with ID starting with 2480e994ac053607f7e653f9c474a4fc5512d57fe1b96a76515c4161367eaf41 not found: ID does not exist" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.624324 4664 scope.go:117] "RemoveContainer" containerID="02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07" Oct 13 07:08:32 crc kubenswrapper[4664]: E1013 07:08:32.624628 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07\": container with ID starting with 02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07 not found: ID does not exist" containerID="02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.624676 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07"} err="failed to get container status \"02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07\": rpc error: code = NotFound desc = could not find container \"02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07\": container with ID starting with 02fdabf122b567d57bb2f1559ae2b4bda5d6364fb03e9e8dab2891db59cbfb07 not found: ID does not exist" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.631841 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: E1013 07:08:32.632278 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerName="rabbitmq" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.632296 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerName="rabbitmq" Oct 13 07:08:32 crc kubenswrapper[4664]: E1013 07:08:32.632316 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerName="setup-container" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.632323 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerName="setup-container" Oct 13 07:08:32 crc kubenswrapper[4664]: E1013 07:08:32.632347 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerName="setup-container" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.632354 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerName="setup-container" Oct 13 07:08:32 crc kubenswrapper[4664]: E1013 07:08:32.632367 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerName="rabbitmq" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.632373 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerName="rabbitmq" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.632555 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" containerName="rabbitmq" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.632589 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" containerName="rabbitmq" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.633739 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.640412 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.640697 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.640895 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-j24j9" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.641087 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.641178 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.641096 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.641426 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.658602 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.684393 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.686104 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.698743 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.701593 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.701848 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.701907 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xfxcb" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.701872 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.702099 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.702134 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.703502 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.715415 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762721 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbk4v\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-kube-api-access-qbk4v\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762778 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762840 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762861 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762876 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762907 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762953 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.762984 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e03819f-6d75-4465-908a-a3ab436ab132-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.763005 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e03819f-6d75-4465-908a-a3ab436ab132-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.763030 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.763050 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.865091 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbk4v\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-kube-api-access-qbk4v\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.865679 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.865856 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.866618 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.866718 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.866767 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.866831 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.866868 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867328 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0c856173-8811-44d6-a72c-fd31966ef668-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867392 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-config-data\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867432 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0c856173-8811-44d6-a72c-fd31966ef668-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867470 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867537 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867597 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867663 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867706 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e03819f-6d75-4465-908a-a3ab436ab132-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867745 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867781 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e03819f-6d75-4465-908a-a3ab436ab132-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867861 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867900 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chclz\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-kube-api-access-chclz\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867952 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.867986 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.869158 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.869710 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.870240 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e03819f-6d75-4465-908a-a3ab436ab132-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.870361 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.870375 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.870564 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.876501 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e03819f-6d75-4465-908a-a3ab436ab132-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.876518 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e03819f-6d75-4465-908a-a3ab436ab132-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.877009 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.886584 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbk4v\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-kube-api-access-qbk4v\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.892381 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e03819f-6d75-4465-908a-a3ab436ab132-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.901936 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e03819f-6d75-4465-908a-a3ab436ab132\") " pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.969499 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.969838 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0c856173-8811-44d6-a72c-fd31966ef668-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.969862 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-config-data\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.969884 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0c856173-8811-44d6-a72c-fd31966ef668-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.969924 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.969965 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.969986 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.970014 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chclz\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-kube-api-access-chclz\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.970033 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.970086 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.970105 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.970588 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.970870 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.973104 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.974826 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.975090 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.976888 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0c856173-8811-44d6-a72c-fd31966ef668-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.977054 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-config-data\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.977913 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.978861 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0c856173-8811-44d6-a72c-fd31966ef668-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.979372 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0c856173-8811-44d6-a72c-fd31966ef668-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:32 crc kubenswrapper[4664]: I1013 07:08:32.993580 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chclz\" (UniqueName: \"kubernetes.io/projected/0c856173-8811-44d6-a72c-fd31966ef668-kube-api-access-chclz\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.003191 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.066311 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="180df95d-8f2f-4266-ae45-f9237a3aa1b7" path="/var/lib/kubelet/pods/180df95d-8f2f-4266-ae45-f9237a3aa1b7/volumes" Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.069233 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3c7c8de-e6ed-440c-9f15-635fa77e35e7" path="/var/lib/kubelet/pods/b3c7c8de-e6ed-440c-9f15-635fa77e35e7/volumes" Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.106749 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"0c856173-8811-44d6-a72c-fd31966ef668\") " pod="openstack/rabbitmq-server-0" Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.323077 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xfxcb" Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.330709 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.580492 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 13 07:08:33 crc kubenswrapper[4664]: W1013 07:08:33.776955 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c856173_8811_44d6_a72c_fd31966ef668.slice/crio-61a5d00599932e48dbad4172976aefb7b796b72b3df59a0e2067b43aa767d4d8 WatchSource:0}: Error finding container 61a5d00599932e48dbad4172976aefb7b796b72b3df59a0e2067b43aa767d4d8: Status 404 returned error can't find the container with id 61a5d00599932e48dbad4172976aefb7b796b72b3df59a0e2067b43aa767d4d8 Oct 13 07:08:33 crc kubenswrapper[4664]: I1013 07:08:33.778913 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 13 07:08:34 crc kubenswrapper[4664]: I1013 07:08:34.524167 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0c856173-8811-44d6-a72c-fd31966ef668","Type":"ContainerStarted","Data":"61a5d00599932e48dbad4172976aefb7b796b72b3df59a0e2067b43aa767d4d8"} Oct 13 07:08:34 crc kubenswrapper[4664]: I1013 07:08:34.525711 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e03819f-6d75-4465-908a-a3ab436ab132","Type":"ContainerStarted","Data":"15868fb6f35644a707a5888c52cfa6abcaf4e325c09e8280e9251183f7cd5f53"} Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.016127 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67d44d59c5-zhq5k"] Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.017993 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.020479 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.042384 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67d44d59c5-zhq5k"] Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.123247 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-config\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.123340 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-nb\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.123400 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-openstack-edpm-ipam\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.123446 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-svc\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.123529 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcw57\" (UniqueName: \"kubernetes.io/projected/83bd1d00-31be-4d3d-8d46-204e15e753be-kube-api-access-kcw57\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.123557 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-sb\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.123643 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-swift-storage-0\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.225871 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-sb\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.226901 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcw57\" (UniqueName: \"kubernetes.io/projected/83bd1d00-31be-4d3d-8d46-204e15e753be-kube-api-access-kcw57\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.226852 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-sb\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.227032 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-swift-storage-0\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.227716 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-swift-storage-0\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.227841 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-config\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.227897 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-nb\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.227938 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-openstack-edpm-ipam\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.227980 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-svc\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.228553 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-svc\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.229105 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-config\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.229573 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-nb\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.230055 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-openstack-edpm-ipam\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.262749 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcw57\" (UniqueName: \"kubernetes.io/projected/83bd1d00-31be-4d3d-8d46-204e15e753be-kube-api-access-kcw57\") pod \"dnsmasq-dns-67d44d59c5-zhq5k\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.341161 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.567572 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e03819f-6d75-4465-908a-a3ab436ab132","Type":"ContainerStarted","Data":"63bbc110fbb5cfee89d5b58460df6cf76d453ce13429430be9f2c453fb553f24"} Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.572971 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0c856173-8811-44d6-a72c-fd31966ef668","Type":"ContainerStarted","Data":"a9be8ae22653548c8e4f7c2fa3b8d8e6e798a57c122ebf38c87f485ea00ff264"} Oct 13 07:08:35 crc kubenswrapper[4664]: I1013 07:08:35.919873 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67d44d59c5-zhq5k"] Oct 13 07:08:36 crc kubenswrapper[4664]: I1013 07:08:36.589177 4664 generic.go:334] "Generic (PLEG): container finished" podID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerID="2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7" exitCode=0 Oct 13 07:08:36 crc kubenswrapper[4664]: I1013 07:08:36.591013 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" event={"ID":"83bd1d00-31be-4d3d-8d46-204e15e753be","Type":"ContainerDied","Data":"2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7"} Oct 13 07:08:36 crc kubenswrapper[4664]: I1013 07:08:36.591060 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" event={"ID":"83bd1d00-31be-4d3d-8d46-204e15e753be","Type":"ContainerStarted","Data":"e3508b24aa1ce5c1702b103deca9ec6884f63d00be5774eca9d533abd088b04a"} Oct 13 07:08:37 crc kubenswrapper[4664]: I1013 07:08:37.605377 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" event={"ID":"83bd1d00-31be-4d3d-8d46-204e15e753be","Type":"ContainerStarted","Data":"f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7"} Oct 13 07:08:37 crc kubenswrapper[4664]: I1013 07:08:37.606176 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:37 crc kubenswrapper[4664]: I1013 07:08:37.636578 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" podStartSLOduration=3.636558067 podStartE2EDuration="3.636558067s" podCreationTimestamp="2025-10-13 07:08:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:08:37.631960765 +0000 UTC m=+1325.319405977" watchObservedRunningTime="2025-10-13 07:08:37.636558067 +0000 UTC m=+1325.324003259" Oct 13 07:08:37 crc kubenswrapper[4664]: I1013 07:08:37.739412 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8487d6c5d4-cgnm9" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Oct 13 07:08:37 crc kubenswrapper[4664]: I1013 07:08:37.739552 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.661071 4664 generic.go:334] "Generic (PLEG): container finished" podID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerID="b3140382539f3da699fccec40fbcb27ac5fe0d631e911ac7ccdf3942dc8804fe" exitCode=137 Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.661598 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerDied","Data":"b3140382539f3da699fccec40fbcb27ac5fe0d631e911ac7ccdf3942dc8804fe"} Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.765226 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.854014 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-config-data\") pod \"efaa5cce-79f5-4cdb-abf0-06b59765b776\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.854165 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efaa5cce-79f5-4cdb-abf0-06b59765b776-logs\") pod \"efaa5cce-79f5-4cdb-abf0-06b59765b776\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.854195 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-secret-key\") pod \"efaa5cce-79f5-4cdb-abf0-06b59765b776\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.854238 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-scripts\") pod \"efaa5cce-79f5-4cdb-abf0-06b59765b776\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.854341 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-tls-certs\") pod \"efaa5cce-79f5-4cdb-abf0-06b59765b776\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.854369 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5vml\" (UniqueName: \"kubernetes.io/projected/efaa5cce-79f5-4cdb-abf0-06b59765b776-kube-api-access-f5vml\") pod \"efaa5cce-79f5-4cdb-abf0-06b59765b776\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.854402 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-combined-ca-bundle\") pod \"efaa5cce-79f5-4cdb-abf0-06b59765b776\" (UID: \"efaa5cce-79f5-4cdb-abf0-06b59765b776\") " Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.856713 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efaa5cce-79f5-4cdb-abf0-06b59765b776-logs" (OuterVolumeSpecName: "logs") pod "efaa5cce-79f5-4cdb-abf0-06b59765b776" (UID: "efaa5cce-79f5-4cdb-abf0-06b59765b776"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.871055 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "efaa5cce-79f5-4cdb-abf0-06b59765b776" (UID: "efaa5cce-79f5-4cdb-abf0-06b59765b776"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.873067 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efaa5cce-79f5-4cdb-abf0-06b59765b776-kube-api-access-f5vml" (OuterVolumeSpecName: "kube-api-access-f5vml") pod "efaa5cce-79f5-4cdb-abf0-06b59765b776" (UID: "efaa5cce-79f5-4cdb-abf0-06b59765b776"). InnerVolumeSpecName "kube-api-access-f5vml". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.880178 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-config-data" (OuterVolumeSpecName: "config-data") pod "efaa5cce-79f5-4cdb-abf0-06b59765b776" (UID: "efaa5cce-79f5-4cdb-abf0-06b59765b776"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.883559 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-scripts" (OuterVolumeSpecName: "scripts") pod "efaa5cce-79f5-4cdb-abf0-06b59765b776" (UID: "efaa5cce-79f5-4cdb-abf0-06b59765b776"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.886408 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "efaa5cce-79f5-4cdb-abf0-06b59765b776" (UID: "efaa5cce-79f5-4cdb-abf0-06b59765b776"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.924304 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "efaa5cce-79f5-4cdb-abf0-06b59765b776" (UID: "efaa5cce-79f5-4cdb-abf0-06b59765b776"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.957831 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.957881 4664 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efaa5cce-79f5-4cdb-abf0-06b59765b776-logs\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.957901 4664 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.957920 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efaa5cce-79f5-4cdb-abf0-06b59765b776-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.957937 4664 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.957952 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5vml\" (UniqueName: \"kubernetes.io/projected/efaa5cce-79f5-4cdb-abf0-06b59765b776-kube-api-access-f5vml\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:42 crc kubenswrapper[4664]: I1013 07:08:42.957969 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efaa5cce-79f5-4cdb-abf0-06b59765b776-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:43 crc kubenswrapper[4664]: I1013 07:08:43.677096 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8487d6c5d4-cgnm9" event={"ID":"efaa5cce-79f5-4cdb-abf0-06b59765b776","Type":"ContainerDied","Data":"62ce774e7b934179a542df7e968bd9ff21c78c5a8155f1f3c68bf9bd5d2955ee"} Oct 13 07:08:43 crc kubenswrapper[4664]: I1013 07:08:43.677118 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8487d6c5d4-cgnm9" Oct 13 07:08:43 crc kubenswrapper[4664]: I1013 07:08:43.677578 4664 scope.go:117] "RemoveContainer" containerID="657e1b5cae9fbe4bd1f09c723b2e6ad250d8d31ff00e3536e5c50a48bb0eca8c" Oct 13 07:08:43 crc kubenswrapper[4664]: I1013 07:08:43.705536 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8487d6c5d4-cgnm9"] Oct 13 07:08:43 crc kubenswrapper[4664]: I1013 07:08:43.712672 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-8487d6c5d4-cgnm9"] Oct 13 07:08:43 crc kubenswrapper[4664]: I1013 07:08:43.858165 4664 scope.go:117] "RemoveContainer" containerID="b3140382539f3da699fccec40fbcb27ac5fe0d631e911ac7ccdf3942dc8804fe" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.059300 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" path="/var/lib/kubelet/pods/efaa5cce-79f5-4cdb-abf0-06b59765b776/volumes" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.343990 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.427869 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b474f45dc-wqs97"] Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.428185 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" podUID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerName="dnsmasq-dns" containerID="cri-o://a1fd38c1fa7771421a3d3dd7cbb37829c0170768844388f4674115d543e52eeb" gracePeriod=10 Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.626366 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d8755c899-zzh4h"] Oct 13 07:08:45 crc kubenswrapper[4664]: E1013 07:08:45.626749 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon-log" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.626762 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon-log" Oct 13 07:08:45 crc kubenswrapper[4664]: E1013 07:08:45.626786 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.626808 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: E1013 07:08:45.626817 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.626823 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: E1013 07:08:45.626836 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.626843 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.634211 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.634250 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.634267 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.634282 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon-log" Oct 13 07:08:45 crc kubenswrapper[4664]: E1013 07:08:45.634496 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.634507 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.634707 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="efaa5cce-79f5-4cdb-abf0-06b59765b776" containerName="horizon" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.635422 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.637563 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d8755c899-zzh4h"] Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.720225 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-ovsdbserver-nb\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.720275 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-dns-swift-storage-0\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.721879 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-ovsdbserver-sb\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.721926 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrt68\" (UniqueName: \"kubernetes.io/projected/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-kube-api-access-hrt68\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.721972 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-dns-svc\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.722003 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-config\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.722161 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-openstack-edpm-ipam\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.732380 4664 generic.go:334] "Generic (PLEG): container finished" podID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerID="a1fd38c1fa7771421a3d3dd7cbb37829c0170768844388f4674115d543e52eeb" exitCode=0 Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.732423 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" event={"ID":"3bc8906d-8312-44af-9ff5-eeac77de1533","Type":"ContainerDied","Data":"a1fd38c1fa7771421a3d3dd7cbb37829c0170768844388f4674115d543e52eeb"} Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.828885 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrt68\" (UniqueName: \"kubernetes.io/projected/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-kube-api-access-hrt68\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.828999 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-dns-svc\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.829056 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-config\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.829093 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-openstack-edpm-ipam\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.829326 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-ovsdbserver-nb\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.829371 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-dns-swift-storage-0\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.829478 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-ovsdbserver-sb\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.830110 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-config\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.830264 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-ovsdbserver-sb\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.830404 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-dns-svc\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.831000 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-ovsdbserver-nb\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.831075 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-openstack-edpm-ipam\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.833732 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-dns-swift-storage-0\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.859242 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrt68\" (UniqueName: \"kubernetes.io/projected/b5ff54ae-da3a-4f2a-9931-9cd0834e4672-kube-api-access-hrt68\") pod \"dnsmasq-dns-6d8755c899-zzh4h\" (UID: \"b5ff54ae-da3a-4f2a-9931-9cd0834e4672\") " pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.964373 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:45 crc kubenswrapper[4664]: I1013 07:08:45.988783 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.134917 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-svc\") pod \"3bc8906d-8312-44af-9ff5-eeac77de1533\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.135788 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-swift-storage-0\") pod \"3bc8906d-8312-44af-9ff5-eeac77de1533\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.135937 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66248\" (UniqueName: \"kubernetes.io/projected/3bc8906d-8312-44af-9ff5-eeac77de1533-kube-api-access-66248\") pod \"3bc8906d-8312-44af-9ff5-eeac77de1533\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.136022 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-sb\") pod \"3bc8906d-8312-44af-9ff5-eeac77de1533\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.136171 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-config\") pod \"3bc8906d-8312-44af-9ff5-eeac77de1533\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.136259 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-nb\") pod \"3bc8906d-8312-44af-9ff5-eeac77de1533\" (UID: \"3bc8906d-8312-44af-9ff5-eeac77de1533\") " Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.148033 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bc8906d-8312-44af-9ff5-eeac77de1533-kube-api-access-66248" (OuterVolumeSpecName: "kube-api-access-66248") pod "3bc8906d-8312-44af-9ff5-eeac77de1533" (UID: "3bc8906d-8312-44af-9ff5-eeac77de1533"). InnerVolumeSpecName "kube-api-access-66248". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.218013 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3bc8906d-8312-44af-9ff5-eeac77de1533" (UID: "3bc8906d-8312-44af-9ff5-eeac77de1533"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.220396 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3bc8906d-8312-44af-9ff5-eeac77de1533" (UID: "3bc8906d-8312-44af-9ff5-eeac77de1533"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.229040 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3bc8906d-8312-44af-9ff5-eeac77de1533" (UID: "3bc8906d-8312-44af-9ff5-eeac77de1533"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.238510 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.238546 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.238561 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.238570 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66248\" (UniqueName: \"kubernetes.io/projected/3bc8906d-8312-44af-9ff5-eeac77de1533-kube-api-access-66248\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.239533 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-config" (OuterVolumeSpecName: "config") pod "3bc8906d-8312-44af-9ff5-eeac77de1533" (UID: "3bc8906d-8312-44af-9ff5-eeac77de1533"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.250999 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3bc8906d-8312-44af-9ff5-eeac77de1533" (UID: "3bc8906d-8312-44af-9ff5-eeac77de1533"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.339833 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.339862 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3bc8906d-8312-44af-9ff5-eeac77de1533-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.515120 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d8755c899-zzh4h"] Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.743726 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" event={"ID":"3bc8906d-8312-44af-9ff5-eeac77de1533","Type":"ContainerDied","Data":"4d899e8add38a4603d0e0b155bbfa07f02c82d634e27057047d9affb650be909"} Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.743960 4664 scope.go:117] "RemoveContainer" containerID="a1fd38c1fa7771421a3d3dd7cbb37829c0170768844388f4674115d543e52eeb" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.743781 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b474f45dc-wqs97" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.745524 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" event={"ID":"b5ff54ae-da3a-4f2a-9931-9cd0834e4672","Type":"ContainerStarted","Data":"af5d0d07407ca5090bda11e7549d32a23b484f91ea003568a327b2a5be4b4ab1"} Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.779362 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b474f45dc-wqs97"] Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.779508 4664 scope.go:117] "RemoveContainer" containerID="1d5e386958a20c575f8ba47ededb64716b2b2e274c4aae690e5c0b203fb1e1c4" Oct 13 07:08:46 crc kubenswrapper[4664]: I1013 07:08:46.790282 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b474f45dc-wqs97"] Oct 13 07:08:47 crc kubenswrapper[4664]: I1013 07:08:47.060896 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bc8906d-8312-44af-9ff5-eeac77de1533" path="/var/lib/kubelet/pods/3bc8906d-8312-44af-9ff5-eeac77de1533/volumes" Oct 13 07:08:47 crc kubenswrapper[4664]: I1013 07:08:47.762315 4664 generic.go:334] "Generic (PLEG): container finished" podID="b5ff54ae-da3a-4f2a-9931-9cd0834e4672" containerID="d9dc58f35dae646fc2a540df1852f260cbaf839500941cef510f17d6e4525780" exitCode=0 Oct 13 07:08:47 crc kubenswrapper[4664]: I1013 07:08:47.762362 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" event={"ID":"b5ff54ae-da3a-4f2a-9931-9cd0834e4672","Type":"ContainerDied","Data":"d9dc58f35dae646fc2a540df1852f260cbaf839500941cef510f17d6e4525780"} Oct 13 07:08:48 crc kubenswrapper[4664]: I1013 07:08:48.775547 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" event={"ID":"b5ff54ae-da3a-4f2a-9931-9cd0834e4672","Type":"ContainerStarted","Data":"17730c6b1ceab324d88bdbe7f86464e5e07a1f49d14419d8580c10d067b77291"} Oct 13 07:08:48 crc kubenswrapper[4664]: I1013 07:08:48.776742 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:48 crc kubenswrapper[4664]: I1013 07:08:48.805897 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" podStartSLOduration=3.805876737 podStartE2EDuration="3.805876737s" podCreationTimestamp="2025-10-13 07:08:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:08:48.798470289 +0000 UTC m=+1336.485915491" watchObservedRunningTime="2025-10-13 07:08:48.805876737 +0000 UTC m=+1336.493321949" Oct 13 07:08:55 crc kubenswrapper[4664]: I1013 07:08:55.964968 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d8755c899-zzh4h" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.071369 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67d44d59c5-zhq5k"] Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.071645 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" podUID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerName="dnsmasq-dns" containerID="cri-o://f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7" gracePeriod=10 Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.591205 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.649714 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcw57\" (UniqueName: \"kubernetes.io/projected/83bd1d00-31be-4d3d-8d46-204e15e753be-kube-api-access-kcw57\") pod \"83bd1d00-31be-4d3d-8d46-204e15e753be\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.650007 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-openstack-edpm-ipam\") pod \"83bd1d00-31be-4d3d-8d46-204e15e753be\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.650152 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-svc\") pod \"83bd1d00-31be-4d3d-8d46-204e15e753be\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.650274 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-sb\") pod \"83bd1d00-31be-4d3d-8d46-204e15e753be\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.651286 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-config\") pod \"83bd1d00-31be-4d3d-8d46-204e15e753be\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.651387 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-swift-storage-0\") pod \"83bd1d00-31be-4d3d-8d46-204e15e753be\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.651475 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-nb\") pod \"83bd1d00-31be-4d3d-8d46-204e15e753be\" (UID: \"83bd1d00-31be-4d3d-8d46-204e15e753be\") " Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.677752 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83bd1d00-31be-4d3d-8d46-204e15e753be-kube-api-access-kcw57" (OuterVolumeSpecName: "kube-api-access-kcw57") pod "83bd1d00-31be-4d3d-8d46-204e15e753be" (UID: "83bd1d00-31be-4d3d-8d46-204e15e753be"). InnerVolumeSpecName "kube-api-access-kcw57". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.708423 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "83bd1d00-31be-4d3d-8d46-204e15e753be" (UID: "83bd1d00-31be-4d3d-8d46-204e15e753be"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.724093 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-config" (OuterVolumeSpecName: "config") pod "83bd1d00-31be-4d3d-8d46-204e15e753be" (UID: "83bd1d00-31be-4d3d-8d46-204e15e753be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.735702 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "83bd1d00-31be-4d3d-8d46-204e15e753be" (UID: "83bd1d00-31be-4d3d-8d46-204e15e753be"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.741912 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "83bd1d00-31be-4d3d-8d46-204e15e753be" (UID: "83bd1d00-31be-4d3d-8d46-204e15e753be"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.744453 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "83bd1d00-31be-4d3d-8d46-204e15e753be" (UID: "83bd1d00-31be-4d3d-8d46-204e15e753be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.753781 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcw57\" (UniqueName: \"kubernetes.io/projected/83bd1d00-31be-4d3d-8d46-204e15e753be-kube-api-access-kcw57\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.753827 4664 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.753840 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.753849 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.753857 4664 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.753866 4664 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.755686 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "83bd1d00-31be-4d3d-8d46-204e15e753be" (UID: "83bd1d00-31be-4d3d-8d46-204e15e753be"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.855106 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/83bd1d00-31be-4d3d-8d46-204e15e753be-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.862519 4664 generic.go:334] "Generic (PLEG): container finished" podID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerID="f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7" exitCode=0 Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.862564 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" event={"ID":"83bd1d00-31be-4d3d-8d46-204e15e753be","Type":"ContainerDied","Data":"f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7"} Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.862592 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" event={"ID":"83bd1d00-31be-4d3d-8d46-204e15e753be","Type":"ContainerDied","Data":"e3508b24aa1ce5c1702b103deca9ec6884f63d00be5774eca9d533abd088b04a"} Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.862608 4664 scope.go:117] "RemoveContainer" containerID="f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.862756 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d44d59c5-zhq5k" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.880495 4664 scope.go:117] "RemoveContainer" containerID="2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.902507 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67d44d59c5-zhq5k"] Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.904483 4664 scope.go:117] "RemoveContainer" containerID="f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7" Oct 13 07:08:56 crc kubenswrapper[4664]: E1013 07:08:56.905007 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7\": container with ID starting with f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7 not found: ID does not exist" containerID="f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.905038 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7"} err="failed to get container status \"f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7\": rpc error: code = NotFound desc = could not find container \"f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7\": container with ID starting with f1bf255895be9ae5b986ba7831ef294e21b28fefe81720744f416c07211b49e7 not found: ID does not exist" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.905068 4664 scope.go:117] "RemoveContainer" containerID="2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7" Oct 13 07:08:56 crc kubenswrapper[4664]: E1013 07:08:56.905329 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7\": container with ID starting with 2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7 not found: ID does not exist" containerID="2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.905412 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7"} err="failed to get container status \"2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7\": rpc error: code = NotFound desc = could not find container \"2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7\": container with ID starting with 2abac1751d83b3b68e7a536128a038d50a73807fb46580ce016f5bc1b53466c7 not found: ID does not exist" Oct 13 07:08:56 crc kubenswrapper[4664]: I1013 07:08:56.912400 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67d44d59c5-zhq5k"] Oct 13 07:08:57 crc kubenswrapper[4664]: I1013 07:08:57.057433 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83bd1d00-31be-4d3d-8d46-204e15e753be" path="/var/lib/kubelet/pods/83bd1d00-31be-4d3d-8d46-204e15e753be/volumes" Oct 13 07:09:07 crc kubenswrapper[4664]: I1013 07:09:07.031492 4664 generic.go:334] "Generic (PLEG): container finished" podID="0c856173-8811-44d6-a72c-fd31966ef668" containerID="a9be8ae22653548c8e4f7c2fa3b8d8e6e798a57c122ebf38c87f485ea00ff264" exitCode=0 Oct 13 07:09:07 crc kubenswrapper[4664]: I1013 07:09:07.031966 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0c856173-8811-44d6-a72c-fd31966ef668","Type":"ContainerDied","Data":"a9be8ae22653548c8e4f7c2fa3b8d8e6e798a57c122ebf38c87f485ea00ff264"} Oct 13 07:09:08 crc kubenswrapper[4664]: I1013 07:09:08.042424 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0c856173-8811-44d6-a72c-fd31966ef668","Type":"ContainerStarted","Data":"1bba1eb698048f509d8f6cbf8d5a669bfa6454d7e58a6c06412ab1e08069f6b3"} Oct 13 07:09:08 crc kubenswrapper[4664]: I1013 07:09:08.042956 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 13 07:09:08 crc kubenswrapper[4664]: I1013 07:09:08.044432 4664 generic.go:334] "Generic (PLEG): container finished" podID="8e03819f-6d75-4465-908a-a3ab436ab132" containerID="63bbc110fbb5cfee89d5b58460df6cf76d453ce13429430be9f2c453fb553f24" exitCode=0 Oct 13 07:09:08 crc kubenswrapper[4664]: I1013 07:09:08.044467 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e03819f-6d75-4465-908a-a3ab436ab132","Type":"ContainerDied","Data":"63bbc110fbb5cfee89d5b58460df6cf76d453ce13429430be9f2c453fb553f24"} Oct 13 07:09:08 crc kubenswrapper[4664]: I1013 07:09:08.082236 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.082217952 podStartE2EDuration="36.082217952s" podCreationTimestamp="2025-10-13 07:08:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:09:08.069585895 +0000 UTC m=+1355.757031097" watchObservedRunningTime="2025-10-13 07:09:08.082217952 +0000 UTC m=+1355.769663144" Oct 13 07:09:09 crc kubenswrapper[4664]: I1013 07:09:09.058726 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e03819f-6d75-4465-908a-a3ab436ab132","Type":"ContainerStarted","Data":"26a575922140c6abfa506402ed3686fd36191c4f9191aa1b8c35dbbe16fcc629"} Oct 13 07:09:09 crc kubenswrapper[4664]: I1013 07:09:09.059239 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:09:09 crc kubenswrapper[4664]: I1013 07:09:09.088367 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.08835143 podStartE2EDuration="37.08835143s" podCreationTimestamp="2025-10-13 07:08:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:09:09.079595756 +0000 UTC m=+1356.767040958" watchObservedRunningTime="2025-10-13 07:09:09.08835143 +0000 UTC m=+1356.775796622" Oct 13 07:09:15 crc kubenswrapper[4664]: I1013 07:09:15.724532 4664 scope.go:117] "RemoveContainer" containerID="65fc8909ea63a857d9cb64e8069c756e8fd98ffc81aee4dca7c70734a83161d4" Oct 13 07:09:15 crc kubenswrapper[4664]: I1013 07:09:15.752926 4664 scope.go:117] "RemoveContainer" containerID="d75eaeee258f07c74698fb46034db5e0bac8116ac1f3e4f3384ede45cb87b730" Oct 13 07:09:15 crc kubenswrapper[4664]: I1013 07:09:15.796785 4664 scope.go:117] "RemoveContainer" containerID="05f88fb8ca570b9d330a6650f1b32924384538b447ef0afbf12442e2a18233db" Oct 13 07:09:15 crc kubenswrapper[4664]: I1013 07:09:15.852395 4664 scope.go:117] "RemoveContainer" containerID="3b29012776a6a1433dba5f301f050d73da8f81d2d8e6def071afdb7a0cbb3828" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.558485 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq"] Oct 13 07:09:16 crc kubenswrapper[4664]: E1013 07:09:16.559493 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerName="dnsmasq-dns" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.559597 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerName="dnsmasq-dns" Oct 13 07:09:16 crc kubenswrapper[4664]: E1013 07:09:16.559744 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerName="init" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.559839 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerName="init" Oct 13 07:09:16 crc kubenswrapper[4664]: E1013 07:09:16.559938 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerName="dnsmasq-dns" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.560044 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerName="dnsmasq-dns" Oct 13 07:09:16 crc kubenswrapper[4664]: E1013 07:09:16.560128 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerName="init" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.560209 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerName="init" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.560661 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="83bd1d00-31be-4d3d-8d46-204e15e753be" containerName="dnsmasq-dns" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.560777 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bc8906d-8312-44af-9ff5-eeac77de1533" containerName="dnsmasq-dns" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.561789 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.566464 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.566914 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.567232 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.567476 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq"] Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.567660 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.664408 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntnx6\" (UniqueName: \"kubernetes.io/projected/ef7305a7-edd9-4023-a761-713e870e85ce-kube-api-access-ntnx6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.664467 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.664514 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.664567 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.766014 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.766138 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntnx6\" (UniqueName: \"kubernetes.io/projected/ef7305a7-edd9-4023-a761-713e870e85ce-kube-api-access-ntnx6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.766176 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.766233 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.772107 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.774442 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.774632 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.811891 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntnx6\" (UniqueName: \"kubernetes.io/projected/ef7305a7-edd9-4023-a761-713e870e85ce-kube-api-access-ntnx6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:16 crc kubenswrapper[4664]: I1013 07:09:16.882144 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:17 crc kubenswrapper[4664]: W1013 07:09:17.796025 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef7305a7_edd9_4023_a761_713e870e85ce.slice/crio-f541b2c9c051d7c2c4b8b88efded0ef5f836c7431904cfcdab080d5efb437c93 WatchSource:0}: Error finding container f541b2c9c051d7c2c4b8b88efded0ef5f836c7431904cfcdab080d5efb437c93: Status 404 returned error can't find the container with id f541b2c9c051d7c2c4b8b88efded0ef5f836c7431904cfcdab080d5efb437c93 Oct 13 07:09:17 crc kubenswrapper[4664]: I1013 07:09:17.815295 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq"] Oct 13 07:09:18 crc kubenswrapper[4664]: I1013 07:09:18.141534 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" event={"ID":"ef7305a7-edd9-4023-a761-713e870e85ce","Type":"ContainerStarted","Data":"f541b2c9c051d7c2c4b8b88efded0ef5f836c7431904cfcdab080d5efb437c93"} Oct 13 07:09:23 crc kubenswrapper[4664]: I1013 07:09:23.008022 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 13 07:09:23 crc kubenswrapper[4664]: I1013 07:09:23.334008 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 13 07:09:28 crc kubenswrapper[4664]: I1013 07:09:28.249746 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" event={"ID":"ef7305a7-edd9-4023-a761-713e870e85ce","Type":"ContainerStarted","Data":"407773bd3e746250c5b74fffc5e586d853214606a6e0b89cf1d5fbe178ec08ee"} Oct 13 07:09:28 crc kubenswrapper[4664]: I1013 07:09:28.272699 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" podStartSLOduration=2.459674327 podStartE2EDuration="12.272678385s" podCreationTimestamp="2025-10-13 07:09:16 +0000 UTC" firstStartedPulling="2025-10-13 07:09:17.800673274 +0000 UTC m=+1365.488118466" lastFinishedPulling="2025-10-13 07:09:27.613677282 +0000 UTC m=+1375.301122524" observedRunningTime="2025-10-13 07:09:28.265401501 +0000 UTC m=+1375.952846703" watchObservedRunningTime="2025-10-13 07:09:28.272678385 +0000 UTC m=+1375.960123587" Oct 13 07:09:39 crc kubenswrapper[4664]: I1013 07:09:39.392343 4664 generic.go:334] "Generic (PLEG): container finished" podID="ef7305a7-edd9-4023-a761-713e870e85ce" containerID="407773bd3e746250c5b74fffc5e586d853214606a6e0b89cf1d5fbe178ec08ee" exitCode=0 Oct 13 07:09:39 crc kubenswrapper[4664]: I1013 07:09:39.392484 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" event={"ID":"ef7305a7-edd9-4023-a761-713e870e85ce","Type":"ContainerDied","Data":"407773bd3e746250c5b74fffc5e586d853214606a6e0b89cf1d5fbe178ec08ee"} Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.856215 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.923453 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-ssh-key\") pod \"ef7305a7-edd9-4023-a761-713e870e85ce\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.923775 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-inventory\") pod \"ef7305a7-edd9-4023-a761-713e870e85ce\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.923959 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-repo-setup-combined-ca-bundle\") pod \"ef7305a7-edd9-4023-a761-713e870e85ce\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.924227 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntnx6\" (UniqueName: \"kubernetes.io/projected/ef7305a7-edd9-4023-a761-713e870e85ce-kube-api-access-ntnx6\") pod \"ef7305a7-edd9-4023-a761-713e870e85ce\" (UID: \"ef7305a7-edd9-4023-a761-713e870e85ce\") " Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.929991 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "ef7305a7-edd9-4023-a761-713e870e85ce" (UID: "ef7305a7-edd9-4023-a761-713e870e85ce"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.930924 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef7305a7-edd9-4023-a761-713e870e85ce-kube-api-access-ntnx6" (OuterVolumeSpecName: "kube-api-access-ntnx6") pod "ef7305a7-edd9-4023-a761-713e870e85ce" (UID: "ef7305a7-edd9-4023-a761-713e870e85ce"). InnerVolumeSpecName "kube-api-access-ntnx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.968729 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-inventory" (OuterVolumeSpecName: "inventory") pod "ef7305a7-edd9-4023-a761-713e870e85ce" (UID: "ef7305a7-edd9-4023-a761-713e870e85ce"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:09:40 crc kubenswrapper[4664]: I1013 07:09:40.972252 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ef7305a7-edd9-4023-a761-713e870e85ce" (UID: "ef7305a7-edd9-4023-a761-713e870e85ce"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.027196 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.027241 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.027256 4664 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7305a7-edd9-4023-a761-713e870e85ce-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.027273 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntnx6\" (UniqueName: \"kubernetes.io/projected/ef7305a7-edd9-4023-a761-713e870e85ce-kube-api-access-ntnx6\") on node \"crc\" DevicePath \"\"" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.418236 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" event={"ID":"ef7305a7-edd9-4023-a761-713e870e85ce","Type":"ContainerDied","Data":"f541b2c9c051d7c2c4b8b88efded0ef5f836c7431904cfcdab080d5efb437c93"} Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.418284 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f541b2c9c051d7c2c4b8b88efded0ef5f836c7431904cfcdab080d5efb437c93" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.418347 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.545835 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6"] Oct 13 07:09:41 crc kubenswrapper[4664]: E1013 07:09:41.546316 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef7305a7-edd9-4023-a761-713e870e85ce" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.546337 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef7305a7-edd9-4023-a761-713e870e85ce" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.546578 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef7305a7-edd9-4023-a761-713e870e85ce" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.547337 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.549748 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.550013 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.550055 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.549910 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.573008 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6"] Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.644445 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.644506 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdhpg\" (UniqueName: \"kubernetes.io/projected/63b39d04-1c3c-4854-9de5-aba98a83f9a7-kube-api-access-fdhpg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.644625 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.747199 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdhpg\" (UniqueName: \"kubernetes.io/projected/63b39d04-1c3c-4854-9de5-aba98a83f9a7-kube-api-access-fdhpg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.747362 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.747625 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.754072 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.754720 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.778502 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdhpg\" (UniqueName: \"kubernetes.io/projected/63b39d04-1c3c-4854-9de5-aba98a83f9a7-kube-api-access-fdhpg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bvnz6\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:41 crc kubenswrapper[4664]: I1013 07:09:41.872911 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:42 crc kubenswrapper[4664]: I1013 07:09:42.492215 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6"] Oct 13 07:09:43 crc kubenswrapper[4664]: I1013 07:09:43.439870 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" event={"ID":"63b39d04-1c3c-4854-9de5-aba98a83f9a7","Type":"ContainerStarted","Data":"75c16535b92f776811d3dc3627d3ba60dbfb236f753ab2ded7ad70b8a1431930"} Oct 13 07:09:43 crc kubenswrapper[4664]: I1013 07:09:43.440227 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" event={"ID":"63b39d04-1c3c-4854-9de5-aba98a83f9a7","Type":"ContainerStarted","Data":"cde4bdb66f3f5d311235541c74d50100c861849f07b18b6ac976d17fcce14f3b"} Oct 13 07:09:43 crc kubenswrapper[4664]: I1013 07:09:43.461702 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" podStartSLOduration=1.859558335 podStartE2EDuration="2.461682715s" podCreationTimestamp="2025-10-13 07:09:41 +0000 UTC" firstStartedPulling="2025-10-13 07:09:42.509475633 +0000 UTC m=+1390.196920825" lastFinishedPulling="2025-10-13 07:09:43.111600003 +0000 UTC m=+1390.799045205" observedRunningTime="2025-10-13 07:09:43.459839326 +0000 UTC m=+1391.147284518" watchObservedRunningTime="2025-10-13 07:09:43.461682715 +0000 UTC m=+1391.149127917" Oct 13 07:09:46 crc kubenswrapper[4664]: I1013 07:09:46.478095 4664 generic.go:334] "Generic (PLEG): container finished" podID="63b39d04-1c3c-4854-9de5-aba98a83f9a7" containerID="75c16535b92f776811d3dc3627d3ba60dbfb236f753ab2ded7ad70b8a1431930" exitCode=0 Oct 13 07:09:46 crc kubenswrapper[4664]: I1013 07:09:46.478204 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" event={"ID":"63b39d04-1c3c-4854-9de5-aba98a83f9a7","Type":"ContainerDied","Data":"75c16535b92f776811d3dc3627d3ba60dbfb236f753ab2ded7ad70b8a1431930"} Oct 13 07:09:47 crc kubenswrapper[4664]: I1013 07:09:47.978992 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.127485 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdhpg\" (UniqueName: \"kubernetes.io/projected/63b39d04-1c3c-4854-9de5-aba98a83f9a7-kube-api-access-fdhpg\") pod \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.127596 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-inventory\") pod \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.128321 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-ssh-key\") pod \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\" (UID: \"63b39d04-1c3c-4854-9de5-aba98a83f9a7\") " Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.137585 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63b39d04-1c3c-4854-9de5-aba98a83f9a7-kube-api-access-fdhpg" (OuterVolumeSpecName: "kube-api-access-fdhpg") pod "63b39d04-1c3c-4854-9de5-aba98a83f9a7" (UID: "63b39d04-1c3c-4854-9de5-aba98a83f9a7"). InnerVolumeSpecName "kube-api-access-fdhpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.163105 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "63b39d04-1c3c-4854-9de5-aba98a83f9a7" (UID: "63b39d04-1c3c-4854-9de5-aba98a83f9a7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.190056 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-inventory" (OuterVolumeSpecName: "inventory") pod "63b39d04-1c3c-4854-9de5-aba98a83f9a7" (UID: "63b39d04-1c3c-4854-9de5-aba98a83f9a7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.231360 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.231401 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/63b39d04-1c3c-4854-9de5-aba98a83f9a7-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.231414 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdhpg\" (UniqueName: \"kubernetes.io/projected/63b39d04-1c3c-4854-9de5-aba98a83f9a7-kube-api-access-fdhpg\") on node \"crc\" DevicePath \"\"" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.508147 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" event={"ID":"63b39d04-1c3c-4854-9de5-aba98a83f9a7","Type":"ContainerDied","Data":"cde4bdb66f3f5d311235541c74d50100c861849f07b18b6ac976d17fcce14f3b"} Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.508200 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cde4bdb66f3f5d311235541c74d50100c861849f07b18b6ac976d17fcce14f3b" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.508259 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bvnz6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.637890 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6"] Oct 13 07:09:48 crc kubenswrapper[4664]: E1013 07:09:48.638277 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63b39d04-1c3c-4854-9de5-aba98a83f9a7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.638296 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="63b39d04-1c3c-4854-9de5-aba98a83f9a7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.638519 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="63b39d04-1c3c-4854-9de5-aba98a83f9a7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.639168 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.650317 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.650553 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.650999 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.651615 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.658372 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6"] Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.750581 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6gt6\" (UniqueName: \"kubernetes.io/projected/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-kube-api-access-g6gt6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.750673 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.750703 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.751069 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.853154 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.853306 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6gt6\" (UniqueName: \"kubernetes.io/projected/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-kube-api-access-g6gt6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.853362 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.853394 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.862133 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.864228 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.866251 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.888408 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6gt6\" (UniqueName: \"kubernetes.io/projected/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-kube-api-access-g6gt6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:48 crc kubenswrapper[4664]: I1013 07:09:48.967889 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:09:49 crc kubenswrapper[4664]: I1013 07:09:49.526733 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6"] Oct 13 07:09:50 crc kubenswrapper[4664]: I1013 07:09:50.526500 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" event={"ID":"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1","Type":"ContainerStarted","Data":"060b6d199edb52b9ce68dc321d9d80b8af42174b07162e5a4c7e2f4f1db75658"} Oct 13 07:09:50 crc kubenswrapper[4664]: I1013 07:09:50.527111 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" event={"ID":"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1","Type":"ContainerStarted","Data":"21f349b0dad2c4317593c60b6515ded11eaf8b7c6907fdc06cb25ae80eaf1bd8"} Oct 13 07:09:50 crc kubenswrapper[4664]: I1013 07:09:50.542541 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" podStartSLOduration=2.029179148 podStartE2EDuration="2.54252222s" podCreationTimestamp="2025-10-13 07:09:48 +0000 UTC" firstStartedPulling="2025-10-13 07:09:49.530897978 +0000 UTC m=+1397.218343200" lastFinishedPulling="2025-10-13 07:09:50.04424107 +0000 UTC m=+1397.731686272" observedRunningTime="2025-10-13 07:09:50.538977925 +0000 UTC m=+1398.226423137" watchObservedRunningTime="2025-10-13 07:09:50.54252222 +0000 UTC m=+1398.229967412" Oct 13 07:09:58 crc kubenswrapper[4664]: I1013 07:09:58.811593 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:09:58 crc kubenswrapper[4664]: I1013 07:09:58.812247 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:10:16 crc kubenswrapper[4664]: I1013 07:10:16.094642 4664 scope.go:117] "RemoveContainer" containerID="2b1ae8f01cb859ff0e8a1bde428582e5cc338daa0e0312d2cab0a0a1097c8712" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.026996 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pt8xd"] Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.030521 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.045120 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pt8xd"] Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.087850 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-utilities\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.087941 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-catalog-content\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.088205 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbktq\" (UniqueName: \"kubernetes.io/projected/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-kube-api-access-lbktq\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.189932 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-catalog-content\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.190612 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbktq\" (UniqueName: \"kubernetes.io/projected/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-kube-api-access-lbktq\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.191108 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-utilities\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.191199 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-catalog-content\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.191373 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-utilities\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.210393 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbktq\" (UniqueName: \"kubernetes.io/projected/ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8-kube-api-access-lbktq\") pod \"redhat-operators-pt8xd\" (UID: \"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8\") " pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.405563 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.880493 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pt8xd"] Oct 13 07:10:26 crc kubenswrapper[4664]: I1013 07:10:26.954555 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt8xd" event={"ID":"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8","Type":"ContainerStarted","Data":"1ea00cde5ace4036c59a8bf26d9ade8da2eafae652812fd060c9668eb76d436e"} Oct 13 07:10:27 crc kubenswrapper[4664]: I1013 07:10:27.964841 4664 generic.go:334] "Generic (PLEG): container finished" podID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerID="ba92a69381b92b360d18e776a539b80016caab0f495521d480f767dfff8d0450" exitCode=0 Oct 13 07:10:27 crc kubenswrapper[4664]: I1013 07:10:27.964920 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt8xd" event={"ID":"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8","Type":"ContainerDied","Data":"ba92a69381b92b360d18e776a539b80016caab0f495521d480f767dfff8d0450"} Oct 13 07:10:28 crc kubenswrapper[4664]: I1013 07:10:28.813026 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:10:28 crc kubenswrapper[4664]: I1013 07:10:28.813094 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.473208 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bx269"] Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.480119 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.484503 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-utilities\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.484671 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-catalog-content\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.484716 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wqrv\" (UniqueName: \"kubernetes.io/projected/6f8225c3-9253-47c0-969d-c7f046296e90-kube-api-access-7wqrv\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.489288 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bx269"] Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.586451 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-catalog-content\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.587044 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wqrv\" (UniqueName: \"kubernetes.io/projected/6f8225c3-9253-47c0-969d-c7f046296e90-kube-api-access-7wqrv\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.587124 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-catalog-content\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.587212 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-utilities\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.587710 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-utilities\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.611968 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wqrv\" (UniqueName: \"kubernetes.io/projected/6f8225c3-9253-47c0-969d-c7f046296e90-kube-api-access-7wqrv\") pod \"redhat-marketplace-bx269\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:37 crc kubenswrapper[4664]: I1013 07:10:37.823743 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:38 crc kubenswrapper[4664]: I1013 07:10:38.058987 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt8xd" event={"ID":"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8","Type":"ContainerStarted","Data":"07894916758d5f2b53762d44c32f33eeed80ab0bd07b8509730d29c95dc7eca3"} Oct 13 07:10:38 crc kubenswrapper[4664]: I1013 07:10:38.749728 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bx269"] Oct 13 07:10:39 crc kubenswrapper[4664]: I1013 07:10:39.070172 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerStarted","Data":"01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929"} Oct 13 07:10:39 crc kubenswrapper[4664]: I1013 07:10:39.070534 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerStarted","Data":"80bd0410738624105bd2a1c71f726de683747b2b43ce32f6cc4322783b24e0db"} Oct 13 07:10:40 crc kubenswrapper[4664]: I1013 07:10:40.093478 4664 generic.go:334] "Generic (PLEG): container finished" podID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerID="07894916758d5f2b53762d44c32f33eeed80ab0bd07b8509730d29c95dc7eca3" exitCode=0 Oct 13 07:10:40 crc kubenswrapper[4664]: I1013 07:10:40.093759 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt8xd" event={"ID":"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8","Type":"ContainerDied","Data":"07894916758d5f2b53762d44c32f33eeed80ab0bd07b8509730d29c95dc7eca3"} Oct 13 07:10:40 crc kubenswrapper[4664]: I1013 07:10:40.098482 4664 generic.go:334] "Generic (PLEG): container finished" podID="6f8225c3-9253-47c0-969d-c7f046296e90" containerID="01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929" exitCode=0 Oct 13 07:10:40 crc kubenswrapper[4664]: I1013 07:10:40.098532 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerDied","Data":"01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929"} Oct 13 07:10:41 crc kubenswrapper[4664]: I1013 07:10:41.109903 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerStarted","Data":"1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28"} Oct 13 07:10:41 crc kubenswrapper[4664]: I1013 07:10:41.113001 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt8xd" event={"ID":"ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8","Type":"ContainerStarted","Data":"67c01cfbe480cd590ea6f89a5e6aa1f41c0fca04cf5d1a6ffafa48334d3366f4"} Oct 13 07:10:41 crc kubenswrapper[4664]: I1013 07:10:41.153705 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pt8xd" podStartSLOduration=3.440921551 podStartE2EDuration="16.15368604s" podCreationTimestamp="2025-10-13 07:10:25 +0000 UTC" firstStartedPulling="2025-10-13 07:10:27.968776512 +0000 UTC m=+1435.656221704" lastFinishedPulling="2025-10-13 07:10:40.681540971 +0000 UTC m=+1448.368986193" observedRunningTime="2025-10-13 07:10:41.142826671 +0000 UTC m=+1448.830271893" watchObservedRunningTime="2025-10-13 07:10:41.15368604 +0000 UTC m=+1448.841131242" Oct 13 07:10:42 crc kubenswrapper[4664]: I1013 07:10:42.125114 4664 generic.go:334] "Generic (PLEG): container finished" podID="6f8225c3-9253-47c0-969d-c7f046296e90" containerID="1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28" exitCode=0 Oct 13 07:10:42 crc kubenswrapper[4664]: I1013 07:10:42.125163 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerDied","Data":"1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28"} Oct 13 07:10:43 crc kubenswrapper[4664]: I1013 07:10:43.136264 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerStarted","Data":"03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681"} Oct 13 07:10:43 crc kubenswrapper[4664]: I1013 07:10:43.158413 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bx269" podStartSLOduration=3.563007787 podStartE2EDuration="6.1583958s" podCreationTimestamp="2025-10-13 07:10:37 +0000 UTC" firstStartedPulling="2025-10-13 07:10:40.105626203 +0000 UTC m=+1447.793071405" lastFinishedPulling="2025-10-13 07:10:42.701014226 +0000 UTC m=+1450.388459418" observedRunningTime="2025-10-13 07:10:43.15727965 +0000 UTC m=+1450.844724872" watchObservedRunningTime="2025-10-13 07:10:43.1583958 +0000 UTC m=+1450.845841002" Oct 13 07:10:46 crc kubenswrapper[4664]: I1013 07:10:46.405898 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:46 crc kubenswrapper[4664]: I1013 07:10:46.406382 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:10:47 crc kubenswrapper[4664]: I1013 07:10:47.449163 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pt8xd" podUID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:10:47 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:10:47 crc kubenswrapper[4664]: > Oct 13 07:10:47 crc kubenswrapper[4664]: I1013 07:10:47.824715 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:47 crc kubenswrapper[4664]: I1013 07:10:47.826098 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:47 crc kubenswrapper[4664]: I1013 07:10:47.873644 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:48 crc kubenswrapper[4664]: I1013 07:10:48.246062 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:48 crc kubenswrapper[4664]: I1013 07:10:48.299588 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bx269"] Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.212624 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bx269" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="registry-server" containerID="cri-o://03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681" gracePeriod=2 Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.687031 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.868378 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-catalog-content\") pod \"6f8225c3-9253-47c0-969d-c7f046296e90\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.868423 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-utilities\") pod \"6f8225c3-9253-47c0-969d-c7f046296e90\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.868452 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wqrv\" (UniqueName: \"kubernetes.io/projected/6f8225c3-9253-47c0-969d-c7f046296e90-kube-api-access-7wqrv\") pod \"6f8225c3-9253-47c0-969d-c7f046296e90\" (UID: \"6f8225c3-9253-47c0-969d-c7f046296e90\") " Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.869710 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-utilities" (OuterVolumeSpecName: "utilities") pod "6f8225c3-9253-47c0-969d-c7f046296e90" (UID: "6f8225c3-9253-47c0-969d-c7f046296e90"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.869974 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.874469 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f8225c3-9253-47c0-969d-c7f046296e90-kube-api-access-7wqrv" (OuterVolumeSpecName: "kube-api-access-7wqrv") pod "6f8225c3-9253-47c0-969d-c7f046296e90" (UID: "6f8225c3-9253-47c0-969d-c7f046296e90"). InnerVolumeSpecName "kube-api-access-7wqrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.883645 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f8225c3-9253-47c0-969d-c7f046296e90" (UID: "6f8225c3-9253-47c0-969d-c7f046296e90"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.972580 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8225c3-9253-47c0-969d-c7f046296e90-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:10:50 crc kubenswrapper[4664]: I1013 07:10:50.972632 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wqrv\" (UniqueName: \"kubernetes.io/projected/6f8225c3-9253-47c0-969d-c7f046296e90-kube-api-access-7wqrv\") on node \"crc\" DevicePath \"\"" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.233285 4664 generic.go:334] "Generic (PLEG): container finished" podID="6f8225c3-9253-47c0-969d-c7f046296e90" containerID="03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681" exitCode=0 Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.233345 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerDied","Data":"03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681"} Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.233395 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bx269" event={"ID":"6f8225c3-9253-47c0-969d-c7f046296e90","Type":"ContainerDied","Data":"80bd0410738624105bd2a1c71f726de683747b2b43ce32f6cc4322783b24e0db"} Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.233425 4664 scope.go:117] "RemoveContainer" containerID="03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.237265 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bx269" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.271786 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bx269"] Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.276167 4664 scope.go:117] "RemoveContainer" containerID="1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.285877 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bx269"] Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.301171 4664 scope.go:117] "RemoveContainer" containerID="01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.345759 4664 scope.go:117] "RemoveContainer" containerID="03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681" Oct 13 07:10:51 crc kubenswrapper[4664]: E1013 07:10:51.346328 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681\": container with ID starting with 03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681 not found: ID does not exist" containerID="03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.346377 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681"} err="failed to get container status \"03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681\": rpc error: code = NotFound desc = could not find container \"03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681\": container with ID starting with 03df3828300e67e378893fae30cec96ed00724a679d140c14f4f34fcf7914681 not found: ID does not exist" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.346410 4664 scope.go:117] "RemoveContainer" containerID="1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28" Oct 13 07:10:51 crc kubenswrapper[4664]: E1013 07:10:51.346924 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28\": container with ID starting with 1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28 not found: ID does not exist" containerID="1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.346968 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28"} err="failed to get container status \"1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28\": rpc error: code = NotFound desc = could not find container \"1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28\": container with ID starting with 1c9fd917704562302dfd39b82db968e1677f2802e936438f3c64478d23a52e28 not found: ID does not exist" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.346993 4664 scope.go:117] "RemoveContainer" containerID="01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929" Oct 13 07:10:51 crc kubenswrapper[4664]: E1013 07:10:51.347365 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929\": container with ID starting with 01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929 not found: ID does not exist" containerID="01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929" Oct 13 07:10:51 crc kubenswrapper[4664]: I1013 07:10:51.347522 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929"} err="failed to get container status \"01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929\": rpc error: code = NotFound desc = could not find container \"01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929\": container with ID starting with 01d4945b8cd6f2cbdbefa3fd77767778a331dec57b02e54076105915cbda4929 not found: ID does not exist" Oct 13 07:10:53 crc kubenswrapper[4664]: I1013 07:10:53.058240 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" path="/var/lib/kubelet/pods/6f8225c3-9253-47c0-969d-c7f046296e90/volumes" Oct 13 07:10:57 crc kubenswrapper[4664]: I1013 07:10:57.452227 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pt8xd" podUID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:10:57 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:10:57 crc kubenswrapper[4664]: > Oct 13 07:10:58 crc kubenswrapper[4664]: I1013 07:10:58.811451 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:10:58 crc kubenswrapper[4664]: I1013 07:10:58.811499 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:10:58 crc kubenswrapper[4664]: I1013 07:10:58.811539 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:10:58 crc kubenswrapper[4664]: I1013 07:10:58.812279 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5f2cde2ed697e7f4fec7fa8bcc15fd54a4ffa34386ca0974f63cbbf3e4882ad"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:10:58 crc kubenswrapper[4664]: I1013 07:10:58.812331 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://d5f2cde2ed697e7f4fec7fa8bcc15fd54a4ffa34386ca0974f63cbbf3e4882ad" gracePeriod=600 Oct 13 07:10:59 crc kubenswrapper[4664]: I1013 07:10:59.331240 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="d5f2cde2ed697e7f4fec7fa8bcc15fd54a4ffa34386ca0974f63cbbf3e4882ad" exitCode=0 Oct 13 07:10:59 crc kubenswrapper[4664]: I1013 07:10:59.331334 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"d5f2cde2ed697e7f4fec7fa8bcc15fd54a4ffa34386ca0974f63cbbf3e4882ad"} Oct 13 07:10:59 crc kubenswrapper[4664]: I1013 07:10:59.331602 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5"} Oct 13 07:10:59 crc kubenswrapper[4664]: I1013 07:10:59.331623 4664 scope.go:117] "RemoveContainer" containerID="961db826ff7e07c02c9982962c5607bbdb4ed756076ad7120e5104e7d1c7cca4" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.646899 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nhkzk"] Oct 13 07:11:06 crc kubenswrapper[4664]: E1013 07:11:06.647902 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="extract-utilities" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.647919 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="extract-utilities" Oct 13 07:11:06 crc kubenswrapper[4664]: E1013 07:11:06.647957 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="extract-content" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.647966 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="extract-content" Oct 13 07:11:06 crc kubenswrapper[4664]: E1013 07:11:06.647979 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="registry-server" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.647988 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="registry-server" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.648242 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f8225c3-9253-47c0-969d-c7f046296e90" containerName="registry-server" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.650025 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.676016 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nhkzk"] Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.780170 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-catalog-content\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.780449 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s94wm\" (UniqueName: \"kubernetes.io/projected/7107da4d-deb0-4079-9236-93b0b837b171-kube-api-access-s94wm\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.780657 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-utilities\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.882774 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-catalog-content\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.883091 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s94wm\" (UniqueName: \"kubernetes.io/projected/7107da4d-deb0-4079-9236-93b0b837b171-kube-api-access-s94wm\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.883230 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-utilities\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.883271 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-catalog-content\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.883534 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-utilities\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.903615 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s94wm\" (UniqueName: \"kubernetes.io/projected/7107da4d-deb0-4079-9236-93b0b837b171-kube-api-access-s94wm\") pod \"community-operators-nhkzk\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:06 crc kubenswrapper[4664]: I1013 07:11:06.974549 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:07 crc kubenswrapper[4664]: I1013 07:11:07.476353 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pt8xd" podUID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:11:07 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:11:07 crc kubenswrapper[4664]: > Oct 13 07:11:07 crc kubenswrapper[4664]: I1013 07:11:07.521385 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nhkzk"] Oct 13 07:11:08 crc kubenswrapper[4664]: I1013 07:11:08.446707 4664 generic.go:334] "Generic (PLEG): container finished" podID="7107da4d-deb0-4079-9236-93b0b837b171" containerID="ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9" exitCode=0 Oct 13 07:11:08 crc kubenswrapper[4664]: I1013 07:11:08.446777 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nhkzk" event={"ID":"7107da4d-deb0-4079-9236-93b0b837b171","Type":"ContainerDied","Data":"ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9"} Oct 13 07:11:08 crc kubenswrapper[4664]: I1013 07:11:08.447834 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nhkzk" event={"ID":"7107da4d-deb0-4079-9236-93b0b837b171","Type":"ContainerStarted","Data":"72ca048ad950aa06cc959ff10e19398b6f72bd04dac3ed087524c5efe5829258"} Oct 13 07:11:09 crc kubenswrapper[4664]: I1013 07:11:09.459508 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nhkzk" event={"ID":"7107da4d-deb0-4079-9236-93b0b837b171","Type":"ContainerStarted","Data":"158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6"} Oct 13 07:11:13 crc kubenswrapper[4664]: I1013 07:11:13.512248 4664 generic.go:334] "Generic (PLEG): container finished" podID="7107da4d-deb0-4079-9236-93b0b837b171" containerID="158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6" exitCode=0 Oct 13 07:11:13 crc kubenswrapper[4664]: I1013 07:11:13.512597 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nhkzk" event={"ID":"7107da4d-deb0-4079-9236-93b0b837b171","Type":"ContainerDied","Data":"158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6"} Oct 13 07:11:14 crc kubenswrapper[4664]: I1013 07:11:14.545048 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nhkzk" event={"ID":"7107da4d-deb0-4079-9236-93b0b837b171","Type":"ContainerStarted","Data":"c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38"} Oct 13 07:11:14 crc kubenswrapper[4664]: I1013 07:11:14.579544 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nhkzk" podStartSLOduration=3.085057545 podStartE2EDuration="8.57952201s" podCreationTimestamp="2025-10-13 07:11:06 +0000 UTC" firstStartedPulling="2025-10-13 07:11:08.448943125 +0000 UTC m=+1476.136388307" lastFinishedPulling="2025-10-13 07:11:13.94340757 +0000 UTC m=+1481.630852772" observedRunningTime="2025-10-13 07:11:14.567724043 +0000 UTC m=+1482.255169235" watchObservedRunningTime="2025-10-13 07:11:14.57952201 +0000 UTC m=+1482.266967222" Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.166893 4664 scope.go:117] "RemoveContainer" containerID="eed63fb43c233bd43921992b315b134bb8656b2500af465e4d43e6791aeb4586" Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.204173 4664 scope.go:117] "RemoveContainer" containerID="764ed53a91604c3eb760ad3034214b347899c557047289adf49dfd661cd1e6f2" Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.235340 4664 scope.go:117] "RemoveContainer" containerID="855fa1e1e273a560a58855113698758f8ae69c8f0fd44854faa09d3258c67c51" Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.454786 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.511618 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pt8xd" Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.615157 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pt8xd"] Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.700742 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9hkbn"] Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.701256 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9hkbn" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="registry-server" containerID="cri-o://92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3" gracePeriod=2 Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.979455 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:16 crc kubenswrapper[4664]: I1013 07:11:16.982170 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.238870 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.418184 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-utilities\") pod \"06d97f83-9d40-4f8e-bf46-46275670fa82\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.418480 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-catalog-content\") pod \"06d97f83-9d40-4f8e-bf46-46275670fa82\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.418556 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25pjj\" (UniqueName: \"kubernetes.io/projected/06d97f83-9d40-4f8e-bf46-46275670fa82-kube-api-access-25pjj\") pod \"06d97f83-9d40-4f8e-bf46-46275670fa82\" (UID: \"06d97f83-9d40-4f8e-bf46-46275670fa82\") " Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.418583 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-utilities" (OuterVolumeSpecName: "utilities") pod "06d97f83-9d40-4f8e-bf46-46275670fa82" (UID: "06d97f83-9d40-4f8e-bf46-46275670fa82"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.419319 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.433992 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06d97f83-9d40-4f8e-bf46-46275670fa82-kube-api-access-25pjj" (OuterVolumeSpecName: "kube-api-access-25pjj") pod "06d97f83-9d40-4f8e-bf46-46275670fa82" (UID: "06d97f83-9d40-4f8e-bf46-46275670fa82"). InnerVolumeSpecName "kube-api-access-25pjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.505445 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06d97f83-9d40-4f8e-bf46-46275670fa82" (UID: "06d97f83-9d40-4f8e-bf46-46275670fa82"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.527323 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06d97f83-9d40-4f8e-bf46-46275670fa82-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.527565 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25pjj\" (UniqueName: \"kubernetes.io/projected/06d97f83-9d40-4f8e-bf46-46275670fa82-kube-api-access-25pjj\") on node \"crc\" DevicePath \"\"" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.573656 4664 generic.go:334] "Generic (PLEG): container finished" podID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerID="92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3" exitCode=0 Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.573997 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hkbn" event={"ID":"06d97f83-9d40-4f8e-bf46-46275670fa82","Type":"ContainerDied","Data":"92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3"} Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.574033 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hkbn" event={"ID":"06d97f83-9d40-4f8e-bf46-46275670fa82","Type":"ContainerDied","Data":"92e85322d0b772b29d1b7f5e349b4702b6cd8bf795b6cc0607417a84f730614f"} Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.574050 4664 scope.go:117] "RemoveContainer" containerID="92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.574363 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hkbn" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.606018 4664 scope.go:117] "RemoveContainer" containerID="68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.623156 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9hkbn"] Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.631518 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9hkbn"] Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.639921 4664 scope.go:117] "RemoveContainer" containerID="ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.699437 4664 scope.go:117] "RemoveContainer" containerID="92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3" Oct 13 07:11:17 crc kubenswrapper[4664]: E1013 07:11:17.700013 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3\": container with ID starting with 92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3 not found: ID does not exist" containerID="92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.700079 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3"} err="failed to get container status \"92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3\": rpc error: code = NotFound desc = could not find container \"92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3\": container with ID starting with 92c24f7f01a8273367550c6beb38be9c34b8ffa403a6e2c9223aa212eb8b0ed3 not found: ID does not exist" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.700106 4664 scope.go:117] "RemoveContainer" containerID="68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0" Oct 13 07:11:17 crc kubenswrapper[4664]: E1013 07:11:17.700465 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0\": container with ID starting with 68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0 not found: ID does not exist" containerID="68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.700606 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0"} err="failed to get container status \"68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0\": rpc error: code = NotFound desc = could not find container \"68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0\": container with ID starting with 68422d6e9789e445dd6b8e0833637e7a258d42b8c6c89c6dab893c9a123e88b0 not found: ID does not exist" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.700693 4664 scope.go:117] "RemoveContainer" containerID="ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256" Oct 13 07:11:17 crc kubenswrapper[4664]: E1013 07:11:17.701019 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256\": container with ID starting with ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256 not found: ID does not exist" containerID="ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256" Oct 13 07:11:17 crc kubenswrapper[4664]: I1013 07:11:17.701051 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256"} err="failed to get container status \"ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256\": rpc error: code = NotFound desc = could not find container \"ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256\": container with ID starting with ff832233cdf2e7e6e0ab640cc9a7c7c1d5b1578101f779fcfb53364b017a4256 not found: ID does not exist" Oct 13 07:11:18 crc kubenswrapper[4664]: I1013 07:11:18.049750 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nhkzk" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="registry-server" probeResult="failure" output=< Oct 13 07:11:18 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:11:18 crc kubenswrapper[4664]: > Oct 13 07:11:19 crc kubenswrapper[4664]: I1013 07:11:19.056444 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" path="/var/lib/kubelet/pods/06d97f83-9d40-4f8e-bf46-46275670fa82/volumes" Oct 13 07:11:27 crc kubenswrapper[4664]: I1013 07:11:27.083936 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:27 crc kubenswrapper[4664]: I1013 07:11:27.146721 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:27 crc kubenswrapper[4664]: I1013 07:11:27.344216 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nhkzk"] Oct 13 07:11:28 crc kubenswrapper[4664]: I1013 07:11:28.696665 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nhkzk" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="registry-server" containerID="cri-o://c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38" gracePeriod=2 Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.179597 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.340190 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-utilities\") pod \"7107da4d-deb0-4079-9236-93b0b837b171\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.340350 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s94wm\" (UniqueName: \"kubernetes.io/projected/7107da4d-deb0-4079-9236-93b0b837b171-kube-api-access-s94wm\") pod \"7107da4d-deb0-4079-9236-93b0b837b171\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.340494 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-catalog-content\") pod \"7107da4d-deb0-4079-9236-93b0b837b171\" (UID: \"7107da4d-deb0-4079-9236-93b0b837b171\") " Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.340843 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-utilities" (OuterVolumeSpecName: "utilities") pod "7107da4d-deb0-4079-9236-93b0b837b171" (UID: "7107da4d-deb0-4079-9236-93b0b837b171"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.341392 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.352990 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7107da4d-deb0-4079-9236-93b0b837b171-kube-api-access-s94wm" (OuterVolumeSpecName: "kube-api-access-s94wm") pod "7107da4d-deb0-4079-9236-93b0b837b171" (UID: "7107da4d-deb0-4079-9236-93b0b837b171"). InnerVolumeSpecName "kube-api-access-s94wm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.386207 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7107da4d-deb0-4079-9236-93b0b837b171" (UID: "7107da4d-deb0-4079-9236-93b0b837b171"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.443061 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7107da4d-deb0-4079-9236-93b0b837b171-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.443271 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s94wm\" (UniqueName: \"kubernetes.io/projected/7107da4d-deb0-4079-9236-93b0b837b171-kube-api-access-s94wm\") on node \"crc\" DevicePath \"\"" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.708733 4664 generic.go:334] "Generic (PLEG): container finished" podID="7107da4d-deb0-4079-9236-93b0b837b171" containerID="c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38" exitCode=0 Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.708771 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nhkzk" event={"ID":"7107da4d-deb0-4079-9236-93b0b837b171","Type":"ContainerDied","Data":"c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38"} Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.709524 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nhkzk" event={"ID":"7107da4d-deb0-4079-9236-93b0b837b171","Type":"ContainerDied","Data":"72ca048ad950aa06cc959ff10e19398b6f72bd04dac3ed087524c5efe5829258"} Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.708815 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nhkzk" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.709594 4664 scope.go:117] "RemoveContainer" containerID="c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.738919 4664 scope.go:117] "RemoveContainer" containerID="158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.746681 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nhkzk"] Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.764917 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nhkzk"] Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.764982 4664 scope.go:117] "RemoveContainer" containerID="ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.811996 4664 scope.go:117] "RemoveContainer" containerID="c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38" Oct 13 07:11:29 crc kubenswrapper[4664]: E1013 07:11:29.812603 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38\": container with ID starting with c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38 not found: ID does not exist" containerID="c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.812643 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38"} err="failed to get container status \"c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38\": rpc error: code = NotFound desc = could not find container \"c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38\": container with ID starting with c0327bd5921cc3d7ca65f27c977a99b00887e573d2cb1d9e002bb769475a0a38 not found: ID does not exist" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.812669 4664 scope.go:117] "RemoveContainer" containerID="158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6" Oct 13 07:11:29 crc kubenswrapper[4664]: E1013 07:11:29.813070 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6\": container with ID starting with 158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6 not found: ID does not exist" containerID="158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.813106 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6"} err="failed to get container status \"158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6\": rpc error: code = NotFound desc = could not find container \"158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6\": container with ID starting with 158e6682c589253096ff6b5df7e175288b3568a80ab7a737684cfbd5afd947f6 not found: ID does not exist" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.813144 4664 scope.go:117] "RemoveContainer" containerID="ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9" Oct 13 07:11:29 crc kubenswrapper[4664]: E1013 07:11:29.813599 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9\": container with ID starting with ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9 not found: ID does not exist" containerID="ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9" Oct 13 07:11:29 crc kubenswrapper[4664]: I1013 07:11:29.813630 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9"} err="failed to get container status \"ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9\": rpc error: code = NotFound desc = could not find container \"ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9\": container with ID starting with ce857465fea7db727bdc19e9cb9687a4071147a92f4c8d4a19942a330203ebe9 not found: ID does not exist" Oct 13 07:11:31 crc kubenswrapper[4664]: I1013 07:11:31.061895 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7107da4d-deb0-4079-9236-93b0b837b171" path="/var/lib/kubelet/pods/7107da4d-deb0-4079-9236-93b0b837b171/volumes" Oct 13 07:12:52 crc kubenswrapper[4664]: I1013 07:12:52.053630 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-472ms"] Oct 13 07:12:52 crc kubenswrapper[4664]: I1013 07:12:52.062640 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-472ms"] Oct 13 07:12:53 crc kubenswrapper[4664]: I1013 07:12:53.059746 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8864712-58ae-4769-a998-f78da5eaf5ab" path="/var/lib/kubelet/pods/e8864712-58ae-4769-a998-f78da5eaf5ab/volumes" Oct 13 07:12:53 crc kubenswrapper[4664]: I1013 07:12:53.060705 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-4cxhf"] Oct 13 07:12:53 crc kubenswrapper[4664]: I1013 07:12:53.063359 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-4cxhf"] Oct 13 07:12:54 crc kubenswrapper[4664]: I1013 07:12:54.026820 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-zgqsr"] Oct 13 07:12:54 crc kubenswrapper[4664]: I1013 07:12:54.037322 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-zgqsr"] Oct 13 07:12:55 crc kubenswrapper[4664]: I1013 07:12:55.064655 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dbf0b57-1653-44ef-a493-0d5aebc30318" path="/var/lib/kubelet/pods/6dbf0b57-1653-44ef-a493-0d5aebc30318/volumes" Oct 13 07:12:55 crc kubenswrapper[4664]: I1013 07:12:55.066060 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598" path="/var/lib/kubelet/pods/b0eb1ddf-3b6c-4615-b8f8-ef3134b6d598/volumes" Oct 13 07:13:01 crc kubenswrapper[4664]: I1013 07:13:01.029230 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-k459x"] Oct 13 07:13:01 crc kubenswrapper[4664]: I1013 07:13:01.041271 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-h49b2"] Oct 13 07:13:01 crc kubenswrapper[4664]: I1013 07:13:01.059009 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-g6pqn"] Oct 13 07:13:01 crc kubenswrapper[4664]: I1013 07:13:01.063627 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-k459x"] Oct 13 07:13:01 crc kubenswrapper[4664]: I1013 07:13:01.073388 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-g6pqn"] Oct 13 07:13:01 crc kubenswrapper[4664]: I1013 07:13:01.081315 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-h49b2"] Oct 13 07:13:02 crc kubenswrapper[4664]: I1013 07:13:02.039345 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-bdt9h"] Oct 13 07:13:02 crc kubenswrapper[4664]: I1013 07:13:02.048568 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-bdt9h"] Oct 13 07:13:03 crc kubenswrapper[4664]: I1013 07:13:03.059638 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68d06993-8fe8-4f18-842b-a989f5f9c95c" path="/var/lib/kubelet/pods/68d06993-8fe8-4f18-842b-a989f5f9c95c/volumes" Oct 13 07:13:03 crc kubenswrapper[4664]: I1013 07:13:03.061173 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9e45296-e54a-4563-9c44-6cdc4e2e5640" path="/var/lib/kubelet/pods/b9e45296-e54a-4563-9c44-6cdc4e2e5640/volumes" Oct 13 07:13:03 crc kubenswrapper[4664]: I1013 07:13:03.063228 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daa756df-de77-404f-b164-d668cd3a544b" path="/var/lib/kubelet/pods/daa756df-de77-404f-b164-d668cd3a544b/volumes" Oct 13 07:13:03 crc kubenswrapper[4664]: I1013 07:13:03.064502 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcbed96d-d57c-44ce-98ad-9a17f7579163" path="/var/lib/kubelet/pods/fcbed96d-d57c-44ce-98ad-9a17f7579163/volumes" Oct 13 07:13:04 crc kubenswrapper[4664]: I1013 07:13:04.030028 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-e6d7-account-create-bxc77"] Oct 13 07:13:04 crc kubenswrapper[4664]: I1013 07:13:04.049179 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-e6d7-account-create-bxc77"] Oct 13 07:13:05 crc kubenswrapper[4664]: I1013 07:13:05.022556 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-0a22-account-create-59l9x"] Oct 13 07:13:05 crc kubenswrapper[4664]: I1013 07:13:05.031717 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-0a22-account-create-59l9x"] Oct 13 07:13:05 crc kubenswrapper[4664]: I1013 07:13:05.060582 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34827b2f-cc99-4948-868a-be9095deadb1" path="/var/lib/kubelet/pods/34827b2f-cc99-4948-868a-be9095deadb1/volumes" Oct 13 07:13:05 crc kubenswrapper[4664]: I1013 07:13:05.078961 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acd74efa-e542-450a-b7c9-91eee744d0e9" path="/var/lib/kubelet/pods/acd74efa-e542-450a-b7c9-91eee744d0e9/volumes" Oct 13 07:13:07 crc kubenswrapper[4664]: I1013 07:13:07.683194 4664 generic.go:334] "Generic (PLEG): container finished" podID="5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" containerID="060b6d199edb52b9ce68dc321d9d80b8af42174b07162e5a4c7e2f4f1db75658" exitCode=0 Oct 13 07:13:07 crc kubenswrapper[4664]: I1013 07:13:07.683514 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" event={"ID":"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1","Type":"ContainerDied","Data":"060b6d199edb52b9ce68dc321d9d80b8af42174b07162e5a4c7e2f4f1db75658"} Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.037904 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-3913-account-create-zcqth"] Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.057363 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-3913-account-create-zcqth"] Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.181831 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.217138 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-bootstrap-combined-ca-bundle\") pod \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.217213 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6gt6\" (UniqueName: \"kubernetes.io/projected/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-kube-api-access-g6gt6\") pod \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.217250 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-inventory\") pod \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.217307 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-ssh-key\") pod \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\" (UID: \"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1\") " Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.229762 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-kube-api-access-g6gt6" (OuterVolumeSpecName: "kube-api-access-g6gt6") pod "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" (UID: "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1"). InnerVolumeSpecName "kube-api-access-g6gt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.229899 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" (UID: "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.246276 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-inventory" (OuterVolumeSpecName: "inventory") pod "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" (UID: "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.256458 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" (UID: "5f1b4f59-a38d-41bc-8f1b-92479be8b4b1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.319064 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.319106 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.319122 4664 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.319134 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6gt6\" (UniqueName: \"kubernetes.io/projected/5f1b4f59-a38d-41bc-8f1b-92479be8b4b1-kube-api-access-g6gt6\") on node \"crc\" DevicePath \"\"" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.702064 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" event={"ID":"5f1b4f59-a38d-41bc-8f1b-92479be8b4b1","Type":"ContainerDied","Data":"21f349b0dad2c4317593c60b6515ded11eaf8b7c6907fdc06cb25ae80eaf1bd8"} Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.702107 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21f349b0dad2c4317593c60b6515ded11eaf8b7c6907fdc06cb25ae80eaf1bd8" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.702122 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.825491 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2"] Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.826163 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="registry-server" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.826753 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="registry-server" Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.826917 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="extract-utilities" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.827029 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="extract-utilities" Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.827136 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="extract-utilities" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.827221 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="extract-utilities" Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.827355 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="extract-content" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.827431 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="extract-content" Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.827555 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.827655 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.827725 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="extract-content" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.827854 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="extract-content" Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.828090 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="registry-server" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.828155 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="registry-server" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.828698 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="06d97f83-9d40-4f8e-bf46-46275670fa82" containerName="registry-server" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.828794 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f1b4f59-a38d-41bc-8f1b-92479be8b4b1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.828892 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="7107da4d-deb0-4079-9236-93b0b837b171" containerName="registry-server" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.829932 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.833241 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.833588 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.834018 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.834122 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:13:09 crc kubenswrapper[4664]: I1013 07:13:09.835721 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2"] Oct 13 07:13:09 crc kubenswrapper[4664]: E1013 07:13:09.966304 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f1b4f59_a38d_41bc_8f1b_92479be8b4b1.slice/crio-21f349b0dad2c4317593c60b6515ded11eaf8b7c6907fdc06cb25ae80eaf1bd8\": RecentStats: unable to find data in memory cache]" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.030325 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.030416 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tqq5\" (UniqueName: \"kubernetes.io/projected/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-kube-api-access-7tqq5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.030476 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.131738 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tqq5\" (UniqueName: \"kubernetes.io/projected/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-kube-api-access-7tqq5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.131864 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.133985 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.137979 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.138358 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.152710 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tqq5\" (UniqueName: \"kubernetes.io/projected/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-kube-api-access-7tqq5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-t56l2\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:10 crc kubenswrapper[4664]: I1013 07:13:10.448534 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.013467 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2"] Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.016472 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.063226 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="954b0067-d3cf-4f1d-a744-e748779c4422" path="/var/lib/kubelet/pods/954b0067-d3cf-4f1d-a744-e748779c4422/volumes" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.720403 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" event={"ID":"fd7254f1-5310-490d-bf9d-ad81da0a7fb1","Type":"ContainerStarted","Data":"cd134be119aa8ab2908327c400151ad74069d6a03c29396555c5959d31dad0c9"} Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.850502 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wl2pj"] Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.854220 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.896982 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-utilities\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.897038 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb52g\" (UniqueName: \"kubernetes.io/projected/198a76ea-c425-49d6-a4ea-3fda9e08c776-kube-api-access-hb52g\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.897103 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-catalog-content\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.927208 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wl2pj"] Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.998569 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-catalog-content\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.998746 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-utilities\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.998769 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb52g\" (UniqueName: \"kubernetes.io/projected/198a76ea-c425-49d6-a4ea-3fda9e08c776-kube-api-access-hb52g\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.999049 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-catalog-content\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:11 crc kubenswrapper[4664]: I1013 07:13:11.999468 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-utilities\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:12 crc kubenswrapper[4664]: I1013 07:13:12.029969 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb52g\" (UniqueName: \"kubernetes.io/projected/198a76ea-c425-49d6-a4ea-3fda9e08c776-kube-api-access-hb52g\") pod \"certified-operators-wl2pj\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:12 crc kubenswrapper[4664]: I1013 07:13:12.226881 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:12 crc kubenswrapper[4664]: I1013 07:13:12.735419 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" event={"ID":"fd7254f1-5310-490d-bf9d-ad81da0a7fb1","Type":"ContainerStarted","Data":"9d15a394b1540d6737ab350f2c695c4c21690233facfc7358f3b7ff1d5b91874"} Oct 13 07:13:12 crc kubenswrapper[4664]: I1013 07:13:12.752250 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wl2pj"] Oct 13 07:13:12 crc kubenswrapper[4664]: W1013 07:13:12.760400 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod198a76ea_c425_49d6_a4ea_3fda9e08c776.slice/crio-abeafbba4469e245e6b6c88abce08c60c189f745e1156a36f97d8487062f3c50 WatchSource:0}: Error finding container abeafbba4469e245e6b6c88abce08c60c189f745e1156a36f97d8487062f3c50: Status 404 returned error can't find the container with id abeafbba4469e245e6b6c88abce08c60c189f745e1156a36f97d8487062f3c50 Oct 13 07:13:12 crc kubenswrapper[4664]: I1013 07:13:12.786165 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" podStartSLOduration=3.291022173 podStartE2EDuration="3.786145624s" podCreationTimestamp="2025-10-13 07:13:09 +0000 UTC" firstStartedPulling="2025-10-13 07:13:11.016082223 +0000 UTC m=+1598.703527455" lastFinishedPulling="2025-10-13 07:13:11.511205684 +0000 UTC m=+1599.198650906" observedRunningTime="2025-10-13 07:13:12.775359176 +0000 UTC m=+1600.462804388" watchObservedRunningTime="2025-10-13 07:13:12.786145624 +0000 UTC m=+1600.473590816" Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.035423 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-fcae-account-create-2zt28"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.045577 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-fcae-account-create-2zt28"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.068002 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d02b8efa-0c69-4122-a4d1-811d06cf6ac1" path="/var/lib/kubelet/pods/d02b8efa-0c69-4122-a4d1-811d06cf6ac1/volumes" Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.069670 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-0861-account-create-vbdml"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.069713 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1f27-account-create-rspbf"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.075975 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-3aef-account-create-psnmm"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.082463 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1f27-account-create-rspbf"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.088779 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-3aef-account-create-psnmm"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.095320 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-0861-account-create-vbdml"] Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.749765 4664 generic.go:334] "Generic (PLEG): container finished" podID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerID="538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286" exitCode=0 Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.749874 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2pj" event={"ID":"198a76ea-c425-49d6-a4ea-3fda9e08c776","Type":"ContainerDied","Data":"538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286"} Oct 13 07:13:13 crc kubenswrapper[4664]: I1013 07:13:13.750591 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2pj" event={"ID":"198a76ea-c425-49d6-a4ea-3fda9e08c776","Type":"ContainerStarted","Data":"abeafbba4469e245e6b6c88abce08c60c189f745e1156a36f97d8487062f3c50"} Oct 13 07:13:14 crc kubenswrapper[4664]: I1013 07:13:14.762354 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2pj" event={"ID":"198a76ea-c425-49d6-a4ea-3fda9e08c776","Type":"ContainerStarted","Data":"0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660"} Oct 13 07:13:15 crc kubenswrapper[4664]: I1013 07:13:15.062541 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6764cf07-0dd7-4ce1-8ab9-684feaae4aec" path="/var/lib/kubelet/pods/6764cf07-0dd7-4ce1-8ab9-684feaae4aec/volumes" Oct 13 07:13:15 crc kubenswrapper[4664]: I1013 07:13:15.064500 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ac9ed1e-0ef3-48b5-be63-a75276e406e7" path="/var/lib/kubelet/pods/6ac9ed1e-0ef3-48b5-be63-a75276e406e7/volumes" Oct 13 07:13:15 crc kubenswrapper[4664]: I1013 07:13:15.065726 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca5c6d84-14c0-410b-9e05-610f875bdb0a" path="/var/lib/kubelet/pods/ca5c6d84-14c0-410b-9e05-610f875bdb0a/volumes" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.399195 4664 scope.go:117] "RemoveContainer" containerID="a98a1492280263b06e8dffc642d112c6dce58cc3cc13d8c1e6326bf5f2833866" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.447886 4664 scope.go:117] "RemoveContainer" containerID="67ad3ce4c7289a28e31c87521bd6d392222dd212b8a74fc9decd15bebfd95537" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.512203 4664 scope.go:117] "RemoveContainer" containerID="894f366308b7a378f9a4bc5b9cbe6b14b8441c1f0bcaee557bc991889a35a746" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.565382 4664 scope.go:117] "RemoveContainer" containerID="26160e0d45a37ab6eec436ccdf57a11521fbbce6adc6ef2da3a81ab61239ae53" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.630494 4664 scope.go:117] "RemoveContainer" containerID="2f658df34978904e13580e51db56e05c697354be4c6236207c7142d839d2514d" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.666877 4664 scope.go:117] "RemoveContainer" containerID="1e1df26a5e3f60fa964d972da6732ffebf705f2141f9aaa6cb9954b83d6a2069" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.711550 4664 scope.go:117] "RemoveContainer" containerID="beac75c4b7a97f1661d7db4a47290235c738f276f46e5a8c5e801c85434e07c5" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.732109 4664 scope.go:117] "RemoveContainer" containerID="10ba63a4f580a93d2b34b9b5f4af63b5694b661dc66be9637d8e2e7317b5d7d2" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.752003 4664 scope.go:117] "RemoveContainer" containerID="4607bc3d98a8b4a62a54bc5cbc1cfafd39b8638631c628907e695f332877e1b7" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.779645 4664 scope.go:117] "RemoveContainer" containerID="05cad278c964d0f77e8f47b9d9368c7cbeb3cd0dc88a3772db0bfaff99a1cf3f" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.794601 4664 generic.go:334] "Generic (PLEG): container finished" podID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerID="0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660" exitCode=0 Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.794687 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2pj" event={"ID":"198a76ea-c425-49d6-a4ea-3fda9e08c776","Type":"ContainerDied","Data":"0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660"} Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.820245 4664 scope.go:117] "RemoveContainer" containerID="e773dbc49f3028c8c43252c120362347f33d0de3c68ae82615d85272ca0bf6e6" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.849885 4664 scope.go:117] "RemoveContainer" containerID="86fca4589d305b331ec63e8d7d414f93c8a8bcfff146c29aa2cf5a69eb3be901" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.883053 4664 scope.go:117] "RemoveContainer" containerID="c726bd2426a25507bf2fd8c1a897af212fa2aca339c75eaff52015869ed1d0f5" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.915777 4664 scope.go:117] "RemoveContainer" containerID="a1fd8125e060c9248e63ddd73ffaf0bbd6e9a4e9a86d7c71cdf57d890a9face2" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.939646 4664 scope.go:117] "RemoveContainer" containerID="f3f1f0a7aa4f371c4cd8dd2628174b3188091716d8623ffcc23692a3680015a6" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.959254 4664 scope.go:117] "RemoveContainer" containerID="5fb95c8cf34ea5ed92c038af39a74d876313962d6c8055def018aed4773fddf9" Oct 13 07:13:16 crc kubenswrapper[4664]: I1013 07:13:16.978408 4664 scope.go:117] "RemoveContainer" containerID="ee65af1affc1e0560d8e0cd747ad7cc83ecc46f1ab35e839d9ace78c72499225" Oct 13 07:13:17 crc kubenswrapper[4664]: I1013 07:13:17.038283 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-bpmkp"] Oct 13 07:13:17 crc kubenswrapper[4664]: I1013 07:13:17.063926 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-bpmkp"] Oct 13 07:13:17 crc kubenswrapper[4664]: I1013 07:13:17.837562 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2pj" event={"ID":"198a76ea-c425-49d6-a4ea-3fda9e08c776","Type":"ContainerStarted","Data":"782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b"} Oct 13 07:13:17 crc kubenswrapper[4664]: I1013 07:13:17.868470 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wl2pj" podStartSLOduration=3.30691624 podStartE2EDuration="6.868454583s" podCreationTimestamp="2025-10-13 07:13:11 +0000 UTC" firstStartedPulling="2025-10-13 07:13:13.752767506 +0000 UTC m=+1601.440212708" lastFinishedPulling="2025-10-13 07:13:17.314305869 +0000 UTC m=+1605.001751051" observedRunningTime="2025-10-13 07:13:17.864379375 +0000 UTC m=+1605.551824587" watchObservedRunningTime="2025-10-13 07:13:17.868454583 +0000 UTC m=+1605.555899775" Oct 13 07:13:19 crc kubenswrapper[4664]: I1013 07:13:19.060725 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21047ce5-4bdf-4660-a953-ab87a8e5e5e1" path="/var/lib/kubelet/pods/21047ce5-4bdf-4660-a953-ab87a8e5e5e1/volumes" Oct 13 07:13:22 crc kubenswrapper[4664]: I1013 07:13:22.227978 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:22 crc kubenswrapper[4664]: I1013 07:13:22.229087 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:22 crc kubenswrapper[4664]: I1013 07:13:22.301002 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:22 crc kubenswrapper[4664]: I1013 07:13:22.961076 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:23 crc kubenswrapper[4664]: I1013 07:13:23.056589 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wl2pj"] Oct 13 07:13:24 crc kubenswrapper[4664]: I1013 07:13:24.912334 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wl2pj" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="registry-server" containerID="cri-o://782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b" gracePeriod=2 Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.473463 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.563093 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hb52g\" (UniqueName: \"kubernetes.io/projected/198a76ea-c425-49d6-a4ea-3fda9e08c776-kube-api-access-hb52g\") pod \"198a76ea-c425-49d6-a4ea-3fda9e08c776\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.563501 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-utilities\") pod \"198a76ea-c425-49d6-a4ea-3fda9e08c776\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.563580 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-catalog-content\") pod \"198a76ea-c425-49d6-a4ea-3fda9e08c776\" (UID: \"198a76ea-c425-49d6-a4ea-3fda9e08c776\") " Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.564885 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-utilities" (OuterVolumeSpecName: "utilities") pod "198a76ea-c425-49d6-a4ea-3fda9e08c776" (UID: "198a76ea-c425-49d6-a4ea-3fda9e08c776"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.568889 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/198a76ea-c425-49d6-a4ea-3fda9e08c776-kube-api-access-hb52g" (OuterVolumeSpecName: "kube-api-access-hb52g") pod "198a76ea-c425-49d6-a4ea-3fda9e08c776" (UID: "198a76ea-c425-49d6-a4ea-3fda9e08c776"). InnerVolumeSpecName "kube-api-access-hb52g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.612129 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "198a76ea-c425-49d6-a4ea-3fda9e08c776" (UID: "198a76ea-c425-49d6-a4ea-3fda9e08c776"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.666067 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hb52g\" (UniqueName: \"kubernetes.io/projected/198a76ea-c425-49d6-a4ea-3fda9e08c776-kube-api-access-hb52g\") on node \"crc\" DevicePath \"\"" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.666316 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.666381 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/198a76ea-c425-49d6-a4ea-3fda9e08c776-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.924555 4664 generic.go:334] "Generic (PLEG): container finished" podID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerID="782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b" exitCode=0 Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.924640 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2pj" event={"ID":"198a76ea-c425-49d6-a4ea-3fda9e08c776","Type":"ContainerDied","Data":"782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b"} Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.924726 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2pj" event={"ID":"198a76ea-c425-49d6-a4ea-3fda9e08c776","Type":"ContainerDied","Data":"abeafbba4469e245e6b6c88abce08c60c189f745e1156a36f97d8487062f3c50"} Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.924762 4664 scope.go:117] "RemoveContainer" containerID="782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.926028 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2pj" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.949226 4664 scope.go:117] "RemoveContainer" containerID="0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660" Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.977779 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wl2pj"] Oct 13 07:13:25 crc kubenswrapper[4664]: I1013 07:13:25.999389 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wl2pj"] Oct 13 07:13:26 crc kubenswrapper[4664]: I1013 07:13:26.007130 4664 scope.go:117] "RemoveContainer" containerID="538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286" Oct 13 07:13:26 crc kubenswrapper[4664]: I1013 07:13:26.037343 4664 scope.go:117] "RemoveContainer" containerID="782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b" Oct 13 07:13:26 crc kubenswrapper[4664]: E1013 07:13:26.037945 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b\": container with ID starting with 782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b not found: ID does not exist" containerID="782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b" Oct 13 07:13:26 crc kubenswrapper[4664]: I1013 07:13:26.037983 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b"} err="failed to get container status \"782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b\": rpc error: code = NotFound desc = could not find container \"782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b\": container with ID starting with 782c07b30e5a78657c70fd152955323ceea2160c087494d5fa5d88d514273f5b not found: ID does not exist" Oct 13 07:13:26 crc kubenswrapper[4664]: I1013 07:13:26.038004 4664 scope.go:117] "RemoveContainer" containerID="0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660" Oct 13 07:13:26 crc kubenswrapper[4664]: E1013 07:13:26.038453 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660\": container with ID starting with 0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660 not found: ID does not exist" containerID="0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660" Oct 13 07:13:26 crc kubenswrapper[4664]: I1013 07:13:26.038480 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660"} err="failed to get container status \"0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660\": rpc error: code = NotFound desc = could not find container \"0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660\": container with ID starting with 0c04f7652f46668b4a83e33f5c5f7742d3b944ca53ab41d6e94d92551ed5c660 not found: ID does not exist" Oct 13 07:13:26 crc kubenswrapper[4664]: I1013 07:13:26.038496 4664 scope.go:117] "RemoveContainer" containerID="538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286" Oct 13 07:13:26 crc kubenswrapper[4664]: E1013 07:13:26.040198 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286\": container with ID starting with 538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286 not found: ID does not exist" containerID="538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286" Oct 13 07:13:26 crc kubenswrapper[4664]: I1013 07:13:26.040230 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286"} err="failed to get container status \"538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286\": rpc error: code = NotFound desc = could not find container \"538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286\": container with ID starting with 538be7fe7234d59023d011c0f33f351b09e58e3b019aecc76c121f66ac7fd286 not found: ID does not exist" Oct 13 07:13:27 crc kubenswrapper[4664]: I1013 07:13:27.074263 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" path="/var/lib/kubelet/pods/198a76ea-c425-49d6-a4ea-3fda9e08c776/volumes" Oct 13 07:13:28 crc kubenswrapper[4664]: I1013 07:13:28.811745 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:13:28 crc kubenswrapper[4664]: I1013 07:13:28.811891 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:13:58 crc kubenswrapper[4664]: I1013 07:13:58.812151 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:13:58 crc kubenswrapper[4664]: I1013 07:13:58.812738 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:14:06 crc kubenswrapper[4664]: I1013 07:14:06.050020 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-qt7sf"] Oct 13 07:14:06 crc kubenswrapper[4664]: I1013 07:14:06.058095 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-qt7sf"] Oct 13 07:14:07 crc kubenswrapper[4664]: I1013 07:14:07.059021 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a1074e-9a30-4cb7-8278-60d4e8eaf9c7" path="/var/lib/kubelet/pods/57a1074e-9a30-4cb7-8278-60d4e8eaf9c7/volumes" Oct 13 07:14:17 crc kubenswrapper[4664]: I1013 07:14:17.063610 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-nhrg9"] Oct 13 07:14:17 crc kubenswrapper[4664]: I1013 07:14:17.065573 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-tvqs6"] Oct 13 07:14:17 crc kubenswrapper[4664]: I1013 07:14:17.074609 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-nhrg9"] Oct 13 07:14:17 crc kubenswrapper[4664]: I1013 07:14:17.082870 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-tvqs6"] Oct 13 07:14:17 crc kubenswrapper[4664]: I1013 07:14:17.368185 4664 scope.go:117] "RemoveContainer" containerID="602abac067ce7d01f36bd018b9499cbae893c782189358d70f04166abee94494" Oct 13 07:14:17 crc kubenswrapper[4664]: I1013 07:14:17.393335 4664 scope.go:117] "RemoveContainer" containerID="d9d76b10d66b97d7dd0eff360b656d3dda15c232bcc7d475885e3a87efa557b6" Oct 13 07:14:19 crc kubenswrapper[4664]: I1013 07:14:19.030029 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-g6lvp"] Oct 13 07:14:19 crc kubenswrapper[4664]: I1013 07:14:19.040667 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-g6lvp"] Oct 13 07:14:19 crc kubenswrapper[4664]: I1013 07:14:19.058700 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3145c64c-4c70-4445-9ad2-bf492cb74e64" path="/var/lib/kubelet/pods/3145c64c-4c70-4445-9ad2-bf492cb74e64/volumes" Oct 13 07:14:19 crc kubenswrapper[4664]: I1013 07:14:19.060929 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="817edc64-6579-4cfd-97ab-705680d79119" path="/var/lib/kubelet/pods/817edc64-6579-4cfd-97ab-705680d79119/volumes" Oct 13 07:14:19 crc kubenswrapper[4664]: I1013 07:14:19.062126 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dac71586-090c-42d0-b9a2-9f53b4937c09" path="/var/lib/kubelet/pods/dac71586-090c-42d0-b9a2-9f53b4937c09/volumes" Oct 13 07:14:28 crc kubenswrapper[4664]: I1013 07:14:28.811561 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:14:28 crc kubenswrapper[4664]: I1013 07:14:28.812307 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:14:28 crc kubenswrapper[4664]: I1013 07:14:28.812361 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:14:28 crc kubenswrapper[4664]: I1013 07:14:28.813196 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:14:28 crc kubenswrapper[4664]: I1013 07:14:28.813253 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" gracePeriod=600 Oct 13 07:14:28 crc kubenswrapper[4664]: E1013 07:14:28.948670 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:14:29 crc kubenswrapper[4664]: I1013 07:14:29.558011 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" exitCode=0 Oct 13 07:14:29 crc kubenswrapper[4664]: I1013 07:14:29.558073 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5"} Oct 13 07:14:29 crc kubenswrapper[4664]: I1013 07:14:29.558119 4664 scope.go:117] "RemoveContainer" containerID="d5f2cde2ed697e7f4fec7fa8bcc15fd54a4ffa34386ca0974f63cbbf3e4882ad" Oct 13 07:14:29 crc kubenswrapper[4664]: I1013 07:14:29.558907 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:14:29 crc kubenswrapper[4664]: E1013 07:14:29.559306 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:14:32 crc kubenswrapper[4664]: I1013 07:14:32.049687 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-7nsnc"] Oct 13 07:14:32 crc kubenswrapper[4664]: I1013 07:14:32.065520 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-2x2fr"] Oct 13 07:14:32 crc kubenswrapper[4664]: I1013 07:14:32.079090 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-2x2fr"] Oct 13 07:14:32 crc kubenswrapper[4664]: I1013 07:14:32.087027 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-7nsnc"] Oct 13 07:14:33 crc kubenswrapper[4664]: I1013 07:14:33.028378 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-jbdfp"] Oct 13 07:14:33 crc kubenswrapper[4664]: I1013 07:14:33.039386 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-jbdfp"] Oct 13 07:14:33 crc kubenswrapper[4664]: I1013 07:14:33.065623 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b2b826f-7b2a-4de6-9f80-7c854b988a67" path="/var/lib/kubelet/pods/0b2b826f-7b2a-4de6-9f80-7c854b988a67/volumes" Oct 13 07:14:33 crc kubenswrapper[4664]: I1013 07:14:33.069098 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ca51385-fe74-4d5b-a542-f33734fb8e46" path="/var/lib/kubelet/pods/0ca51385-fe74-4d5b-a542-f33734fb8e46/volumes" Oct 13 07:14:33 crc kubenswrapper[4664]: I1013 07:14:33.070487 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad23ea69-3e65-4f4c-afdc-21abded4e19c" path="/var/lib/kubelet/pods/ad23ea69-3e65-4f4c-afdc-21abded4e19c/volumes" Oct 13 07:14:44 crc kubenswrapper[4664]: I1013 07:14:44.046864 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:14:44 crc kubenswrapper[4664]: E1013 07:14:44.047576 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:14:59 crc kubenswrapper[4664]: I1013 07:14:59.047773 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:14:59 crc kubenswrapper[4664]: E1013 07:14:59.048988 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.159978 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx"] Oct 13 07:15:00 crc kubenswrapper[4664]: E1013 07:15:00.160671 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="extract-content" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.160688 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="extract-content" Oct 13 07:15:00 crc kubenswrapper[4664]: E1013 07:15:00.160712 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="registry-server" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.160717 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="registry-server" Oct 13 07:15:00 crc kubenswrapper[4664]: E1013 07:15:00.160730 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="extract-utilities" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.160737 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="extract-utilities" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.160943 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="198a76ea-c425-49d6-a4ea-3fda9e08c776" containerName="registry-server" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.161557 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.163952 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.165463 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.194595 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx"] Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.286940 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac51715e-8507-4676-89b6-f76b6419e7d1-config-volume\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.286992 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx82f\" (UniqueName: \"kubernetes.io/projected/ac51715e-8507-4676-89b6-f76b6419e7d1-kube-api-access-mx82f\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.287071 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac51715e-8507-4676-89b6-f76b6419e7d1-secret-volume\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.388858 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac51715e-8507-4676-89b6-f76b6419e7d1-config-volume\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.388925 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx82f\" (UniqueName: \"kubernetes.io/projected/ac51715e-8507-4676-89b6-f76b6419e7d1-kube-api-access-mx82f\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.388969 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac51715e-8507-4676-89b6-f76b6419e7d1-secret-volume\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.390033 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac51715e-8507-4676-89b6-f76b6419e7d1-config-volume\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.395469 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac51715e-8507-4676-89b6-f76b6419e7d1-secret-volume\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.413766 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx82f\" (UniqueName: \"kubernetes.io/projected/ac51715e-8507-4676-89b6-f76b6419e7d1-kube-api-access-mx82f\") pod \"collect-profiles-29338995-9wbwx\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.495315 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:00 crc kubenswrapper[4664]: I1013 07:15:00.968886 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx"] Oct 13 07:15:01 crc kubenswrapper[4664]: I1013 07:15:01.861330 4664 generic.go:334] "Generic (PLEG): container finished" podID="ac51715e-8507-4676-89b6-f76b6419e7d1" containerID="2836ade4fdbf931d7f75665c43b584016de43038d3f90bd67a7e422e7a989634" exitCode=0 Oct 13 07:15:01 crc kubenswrapper[4664]: I1013 07:15:01.861448 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" event={"ID":"ac51715e-8507-4676-89b6-f76b6419e7d1","Type":"ContainerDied","Data":"2836ade4fdbf931d7f75665c43b584016de43038d3f90bd67a7e422e7a989634"} Oct 13 07:15:01 crc kubenswrapper[4664]: I1013 07:15:01.861577 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" event={"ID":"ac51715e-8507-4676-89b6-f76b6419e7d1","Type":"ContainerStarted","Data":"25dc6fd33bffb4545d0f6a4761cd799000d1429da37157e0f78c5fbb91d26cee"} Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.217204 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.246821 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac51715e-8507-4676-89b6-f76b6419e7d1-secret-volume\") pod \"ac51715e-8507-4676-89b6-f76b6419e7d1\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.246900 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac51715e-8507-4676-89b6-f76b6419e7d1-config-volume\") pod \"ac51715e-8507-4676-89b6-f76b6419e7d1\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.247100 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mx82f\" (UniqueName: \"kubernetes.io/projected/ac51715e-8507-4676-89b6-f76b6419e7d1-kube-api-access-mx82f\") pod \"ac51715e-8507-4676-89b6-f76b6419e7d1\" (UID: \"ac51715e-8507-4676-89b6-f76b6419e7d1\") " Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.249051 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac51715e-8507-4676-89b6-f76b6419e7d1-config-volume" (OuterVolumeSpecName: "config-volume") pod "ac51715e-8507-4676-89b6-f76b6419e7d1" (UID: "ac51715e-8507-4676-89b6-f76b6419e7d1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.253922 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac51715e-8507-4676-89b6-f76b6419e7d1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ac51715e-8507-4676-89b6-f76b6419e7d1" (UID: "ac51715e-8507-4676-89b6-f76b6419e7d1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.255254 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac51715e-8507-4676-89b6-f76b6419e7d1-kube-api-access-mx82f" (OuterVolumeSpecName: "kube-api-access-mx82f") pod "ac51715e-8507-4676-89b6-f76b6419e7d1" (UID: "ac51715e-8507-4676-89b6-f76b6419e7d1"). InnerVolumeSpecName "kube-api-access-mx82f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.349840 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac51715e-8507-4676-89b6-f76b6419e7d1-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.350176 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac51715e-8507-4676-89b6-f76b6419e7d1-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.350189 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mx82f\" (UniqueName: \"kubernetes.io/projected/ac51715e-8507-4676-89b6-f76b6419e7d1-kube-api-access-mx82f\") on node \"crc\" DevicePath \"\"" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.887577 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" event={"ID":"ac51715e-8507-4676-89b6-f76b6419e7d1","Type":"ContainerDied","Data":"25dc6fd33bffb4545d0f6a4761cd799000d1429da37157e0f78c5fbb91d26cee"} Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.887627 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25dc6fd33bffb4545d0f6a4761cd799000d1429da37157e0f78c5fbb91d26cee" Oct 13 07:15:03 crc kubenswrapper[4664]: I1013 07:15:03.887646 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx" Oct 13 07:15:09 crc kubenswrapper[4664]: I1013 07:15:09.953105 4664 generic.go:334] "Generic (PLEG): container finished" podID="fd7254f1-5310-490d-bf9d-ad81da0a7fb1" containerID="9d15a394b1540d6737ab350f2c695c4c21690233facfc7358f3b7ff1d5b91874" exitCode=0 Oct 13 07:15:09 crc kubenswrapper[4664]: I1013 07:15:09.953203 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" event={"ID":"fd7254f1-5310-490d-bf9d-ad81da0a7fb1","Type":"ContainerDied","Data":"9d15a394b1540d6737ab350f2c695c4c21690233facfc7358f3b7ff1d5b91874"} Oct 13 07:15:10 crc kubenswrapper[4664]: I1013 07:15:10.047631 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:15:10 crc kubenswrapper[4664]: E1013 07:15:10.047943 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.422916 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.549208 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tqq5\" (UniqueName: \"kubernetes.io/projected/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-kube-api-access-7tqq5\") pod \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.549338 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-ssh-key\") pod \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.549383 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-inventory\") pod \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\" (UID: \"fd7254f1-5310-490d-bf9d-ad81da0a7fb1\") " Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.555005 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-kube-api-access-7tqq5" (OuterVolumeSpecName: "kube-api-access-7tqq5") pod "fd7254f1-5310-490d-bf9d-ad81da0a7fb1" (UID: "fd7254f1-5310-490d-bf9d-ad81da0a7fb1"). InnerVolumeSpecName "kube-api-access-7tqq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.582574 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fd7254f1-5310-490d-bf9d-ad81da0a7fb1" (UID: "fd7254f1-5310-490d-bf9d-ad81da0a7fb1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.598722 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-inventory" (OuterVolumeSpecName: "inventory") pod "fd7254f1-5310-490d-bf9d-ad81da0a7fb1" (UID: "fd7254f1-5310-490d-bf9d-ad81da0a7fb1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.652926 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tqq5\" (UniqueName: \"kubernetes.io/projected/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-kube-api-access-7tqq5\") on node \"crc\" DevicePath \"\"" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.652958 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.652967 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd7254f1-5310-490d-bf9d-ad81da0a7fb1-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.983839 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" event={"ID":"fd7254f1-5310-490d-bf9d-ad81da0a7fb1","Type":"ContainerDied","Data":"cd134be119aa8ab2908327c400151ad74069d6a03c29396555c5959d31dad0c9"} Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.983891 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd134be119aa8ab2908327c400151ad74069d6a03c29396555c5959d31dad0c9" Oct 13 07:15:11 crc kubenswrapper[4664]: I1013 07:15:11.983937 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-t56l2" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.106339 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts"] Oct 13 07:15:12 crc kubenswrapper[4664]: E1013 07:15:12.106903 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd7254f1-5310-490d-bf9d-ad81da0a7fb1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.106929 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd7254f1-5310-490d-bf9d-ad81da0a7fb1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Oct 13 07:15:12 crc kubenswrapper[4664]: E1013 07:15:12.106948 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac51715e-8507-4676-89b6-f76b6419e7d1" containerName="collect-profiles" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.106959 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac51715e-8507-4676-89b6-f76b6419e7d1" containerName="collect-profiles" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.107220 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd7254f1-5310-490d-bf9d-ad81da0a7fb1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.107250 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac51715e-8507-4676-89b6-f76b6419e7d1" containerName="collect-profiles" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.108837 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.112169 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.112429 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.112545 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.117314 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.122164 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts"] Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.176751 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.177028 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.177749 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5vk5\" (UniqueName: \"kubernetes.io/projected/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-kube-api-access-c5vk5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.279668 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.279878 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.280001 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5vk5\" (UniqueName: \"kubernetes.io/projected/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-kube-api-access-c5vk5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.284659 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.285974 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.297560 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5vk5\" (UniqueName: \"kubernetes.io/projected/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-kube-api-access-c5vk5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-rstts\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:12 crc kubenswrapper[4664]: I1013 07:15:12.433754 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:15:13 crc kubenswrapper[4664]: I1013 07:15:13.038172 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts"] Oct 13 07:15:14 crc kubenswrapper[4664]: I1013 07:15:14.007321 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" event={"ID":"a0c2c2ed-c303-4f67-b8fc-fb13f3218090","Type":"ContainerStarted","Data":"467fbc71ba1ac16917711d24e599140e717be7de8e9afe4bec9b02e10d127024"} Oct 13 07:15:14 crc kubenswrapper[4664]: I1013 07:15:14.008958 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" event={"ID":"a0c2c2ed-c303-4f67-b8fc-fb13f3218090","Type":"ContainerStarted","Data":"68bdb98fb3babe2eafb5d6dbb887222e0d11cac7094dbb7031cb99270927a58e"} Oct 13 07:15:14 crc kubenswrapper[4664]: I1013 07:15:14.037068 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" podStartSLOduration=1.494585714 podStartE2EDuration="2.037040647s" podCreationTimestamp="2025-10-13 07:15:12 +0000 UTC" firstStartedPulling="2025-10-13 07:15:13.054413559 +0000 UTC m=+1720.741858751" lastFinishedPulling="2025-10-13 07:15:13.596868452 +0000 UTC m=+1721.284313684" observedRunningTime="2025-10-13 07:15:14.033104422 +0000 UTC m=+1721.720549644" watchObservedRunningTime="2025-10-13 07:15:14.037040647 +0000 UTC m=+1721.724485869" Oct 13 07:15:17 crc kubenswrapper[4664]: I1013 07:15:17.526184 4664 scope.go:117] "RemoveContainer" containerID="cbbbe3b2a274195ab47e30f3b223cc94f88a8d640fa6fef4401543959fba725e" Oct 13 07:15:17 crc kubenswrapper[4664]: I1013 07:15:17.572638 4664 scope.go:117] "RemoveContainer" containerID="f742c5133839c6b2697a028bedaa5b2c6a999aa584c199191c899bcc0e844579" Oct 13 07:15:17 crc kubenswrapper[4664]: I1013 07:15:17.640376 4664 scope.go:117] "RemoveContainer" containerID="f03513597d8fbc31e4596fc7b220501859710f528c0d4d3caefa0436ed455c97" Oct 13 07:15:17 crc kubenswrapper[4664]: I1013 07:15:17.692136 4664 scope.go:117] "RemoveContainer" containerID="e3db5d83c1b6440ebe159e708aa3bd5086266878c5e5e5c6a2f61fa012f97ab6" Oct 13 07:15:17 crc kubenswrapper[4664]: I1013 07:15:17.748982 4664 scope.go:117] "RemoveContainer" containerID="15ec08e83e05d18d28ea41c99c390166025a444fb13a4b6aaa2fb20877fb68ae" Oct 13 07:15:17 crc kubenswrapper[4664]: I1013 07:15:17.799387 4664 scope.go:117] "RemoveContainer" containerID="d1353fb72661a2534866aab934397eb5447bac08f8e74ce3079d39e72631348f" Oct 13 07:15:19 crc kubenswrapper[4664]: I1013 07:15:19.060395 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-tdsgr"] Oct 13 07:15:19 crc kubenswrapper[4664]: I1013 07:15:19.064592 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-tdsgr"] Oct 13 07:15:20 crc kubenswrapper[4664]: I1013 07:15:20.055492 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-jw2p6"] Oct 13 07:15:20 crc kubenswrapper[4664]: I1013 07:15:20.096882 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-dzd5z"] Oct 13 07:15:20 crc kubenswrapper[4664]: I1013 07:15:20.114835 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-jw2p6"] Oct 13 07:15:20 crc kubenswrapper[4664]: I1013 07:15:20.126717 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-dzd5z"] Oct 13 07:15:21 crc kubenswrapper[4664]: I1013 07:15:21.058135 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20839d87-6d9f-4137-a447-9da5d1523e9c" path="/var/lib/kubelet/pods/20839d87-6d9f-4137-a447-9da5d1523e9c/volumes" Oct 13 07:15:21 crc kubenswrapper[4664]: I1013 07:15:21.059544 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="417f8a42-5f63-4637-9644-ebb89537f1be" path="/var/lib/kubelet/pods/417f8a42-5f63-4637-9644-ebb89537f1be/volumes" Oct 13 07:15:21 crc kubenswrapper[4664]: I1013 07:15:21.060268 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e" path="/var/lib/kubelet/pods/bb7d9e6d-c2cb-4dff-a6ae-4c53cc6ffb4e/volumes" Oct 13 07:15:24 crc kubenswrapper[4664]: I1013 07:15:24.047654 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:15:24 crc kubenswrapper[4664]: E1013 07:15:24.048456 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:15:32 crc kubenswrapper[4664]: I1013 07:15:32.044520 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-a1f7-account-create-khkpz"] Oct 13 07:15:32 crc kubenswrapper[4664]: I1013 07:15:32.065211 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-9c3b-account-create-5kwlq"] Oct 13 07:15:32 crc kubenswrapper[4664]: I1013 07:15:32.075078 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-a1f7-account-create-khkpz"] Oct 13 07:15:32 crc kubenswrapper[4664]: I1013 07:15:32.084073 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a819-account-create-25fxh"] Oct 13 07:15:32 crc kubenswrapper[4664]: I1013 07:15:32.093676 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-9c3b-account-create-5kwlq"] Oct 13 07:15:32 crc kubenswrapper[4664]: I1013 07:15:32.101496 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-a819-account-create-25fxh"] Oct 13 07:15:33 crc kubenswrapper[4664]: I1013 07:15:33.087547 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00359356-4b01-4fe2-9818-1aee78df254c" path="/var/lib/kubelet/pods/00359356-4b01-4fe2-9818-1aee78df254c/volumes" Oct 13 07:15:33 crc kubenswrapper[4664]: I1013 07:15:33.093237 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d38225c-2fec-4695-8e19-6518db14c972" path="/var/lib/kubelet/pods/7d38225c-2fec-4695-8e19-6518db14c972/volumes" Oct 13 07:15:33 crc kubenswrapper[4664]: I1013 07:15:33.094242 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3cda89c-8f77-40c1-9333-fb92c9e78f02" path="/var/lib/kubelet/pods/e3cda89c-8f77-40c1-9333-fb92c9e78f02/volumes" Oct 13 07:15:39 crc kubenswrapper[4664]: I1013 07:15:39.047166 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:15:39 crc kubenswrapper[4664]: E1013 07:15:39.047958 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:15:50 crc kubenswrapper[4664]: I1013 07:15:50.046989 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:15:50 crc kubenswrapper[4664]: E1013 07:15:50.048216 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:16:04 crc kubenswrapper[4664]: I1013 07:16:04.047198 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:16:04 crc kubenswrapper[4664]: E1013 07:16:04.048247 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:16:09 crc kubenswrapper[4664]: I1013 07:16:09.044410 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8bkjv"] Oct 13 07:16:09 crc kubenswrapper[4664]: I1013 07:16:09.061149 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8bkjv"] Oct 13 07:16:11 crc kubenswrapper[4664]: I1013 07:16:11.062284 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d138f3a-fa9c-4ae4-873e-a8c335b0635d" path="/var/lib/kubelet/pods/6d138f3a-fa9c-4ae4-873e-a8c335b0635d/volumes" Oct 13 07:16:15 crc kubenswrapper[4664]: I1013 07:16:15.047885 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:16:15 crc kubenswrapper[4664]: E1013 07:16:15.048715 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:16:17 crc kubenswrapper[4664]: I1013 07:16:17.956430 4664 scope.go:117] "RemoveContainer" containerID="b6ad04a824ab2ad05f5648116600fd947c44b327a9a629b900eaab41bb9dddee" Oct 13 07:16:17 crc kubenswrapper[4664]: I1013 07:16:17.984100 4664 scope.go:117] "RemoveContainer" containerID="65822424319ed6996a7179ceb3ff1a384c1b7298deb08fe7c822bf57ff34ce99" Oct 13 07:16:18 crc kubenswrapper[4664]: I1013 07:16:18.038148 4664 scope.go:117] "RemoveContainer" containerID="fcb0cacd9b6c27413c44f3f5e9f159eef541d821f381b60a10437e83dc8886f7" Oct 13 07:16:18 crc kubenswrapper[4664]: I1013 07:16:18.081979 4664 scope.go:117] "RemoveContainer" containerID="26d329577c21c19446751da68a7af1193fe877154655e89822ddb6890b610a34" Oct 13 07:16:18 crc kubenswrapper[4664]: I1013 07:16:18.128257 4664 scope.go:117] "RemoveContainer" containerID="362a329a787f3cfb802401f618eda0ad9ee4f1d21430a7dca4c88a1bc01b5706" Oct 13 07:16:18 crc kubenswrapper[4664]: I1013 07:16:18.177749 4664 scope.go:117] "RemoveContainer" containerID="559ef0765609e67603d1204a8d3c67f61fa2b22d2d33ae2350297a7222c9258b" Oct 13 07:16:18 crc kubenswrapper[4664]: I1013 07:16:18.233592 4664 scope.go:117] "RemoveContainer" containerID="f3add747053c2305d656a0f13010a26e53b8bfeea8f17c2ef3ce8e2742fa3780" Oct 13 07:16:29 crc kubenswrapper[4664]: I1013 07:16:29.778878 4664 generic.go:334] "Generic (PLEG): container finished" podID="a0c2c2ed-c303-4f67-b8fc-fb13f3218090" containerID="467fbc71ba1ac16917711d24e599140e717be7de8e9afe4bec9b02e10d127024" exitCode=0 Oct 13 07:16:29 crc kubenswrapper[4664]: I1013 07:16:29.779064 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" event={"ID":"a0c2c2ed-c303-4f67-b8fc-fb13f3218090","Type":"ContainerDied","Data":"467fbc71ba1ac16917711d24e599140e717be7de8e9afe4bec9b02e10d127024"} Oct 13 07:16:30 crc kubenswrapper[4664]: I1013 07:16:30.046811 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:16:30 crc kubenswrapper[4664]: E1013 07:16:30.047075 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.225964 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.246003 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-ssh-key\") pod \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.246099 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5vk5\" (UniqueName: \"kubernetes.io/projected/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-kube-api-access-c5vk5\") pod \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.246135 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-inventory\") pod \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\" (UID: \"a0c2c2ed-c303-4f67-b8fc-fb13f3218090\") " Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.251872 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-kube-api-access-c5vk5" (OuterVolumeSpecName: "kube-api-access-c5vk5") pod "a0c2c2ed-c303-4f67-b8fc-fb13f3218090" (UID: "a0c2c2ed-c303-4f67-b8fc-fb13f3218090"). InnerVolumeSpecName "kube-api-access-c5vk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.276917 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-inventory" (OuterVolumeSpecName: "inventory") pod "a0c2c2ed-c303-4f67-b8fc-fb13f3218090" (UID: "a0c2c2ed-c303-4f67-b8fc-fb13f3218090"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.289490 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a0c2c2ed-c303-4f67-b8fc-fb13f3218090" (UID: "a0c2c2ed-c303-4f67-b8fc-fb13f3218090"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.347169 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.347205 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5vk5\" (UniqueName: \"kubernetes.io/projected/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-kube-api-access-c5vk5\") on node \"crc\" DevicePath \"\"" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.347218 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c2c2ed-c303-4f67-b8fc-fb13f3218090-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.798379 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" event={"ID":"a0c2c2ed-c303-4f67-b8fc-fb13f3218090","Type":"ContainerDied","Data":"68bdb98fb3babe2eafb5d6dbb887222e0d11cac7094dbb7031cb99270927a58e"} Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.798706 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68bdb98fb3babe2eafb5d6dbb887222e0d11cac7094dbb7031cb99270927a58e" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.798624 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-rstts" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.907624 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn"] Oct 13 07:16:31 crc kubenswrapper[4664]: E1013 07:16:31.908145 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0c2c2ed-c303-4f67-b8fc-fb13f3218090" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.908173 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0c2c2ed-c303-4f67-b8fc-fb13f3218090" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.908486 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0c2c2ed-c303-4f67-b8fc-fb13f3218090" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.909432 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.911729 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.912334 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.917827 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.918177 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.928767 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn"] Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.958166 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.958452 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x6g8\" (UniqueName: \"kubernetes.io/projected/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-kube-api-access-4x6g8\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:31 crc kubenswrapper[4664]: I1013 07:16:31.958667 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.061019 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.061140 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.061297 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x6g8\" (UniqueName: \"kubernetes.io/projected/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-kube-api-access-4x6g8\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.066370 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.077364 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.086293 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x6g8\" (UniqueName: \"kubernetes.io/projected/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-kube-api-access-4x6g8\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.232006 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.770228 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn"] Oct 13 07:16:32 crc kubenswrapper[4664]: I1013 07:16:32.809433 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" event={"ID":"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979","Type":"ContainerStarted","Data":"d095c7bd0e08eb5b61f11871389ef4dbb138d6a42ec5ebbaa0082b3845609403"} Oct 13 07:16:33 crc kubenswrapper[4664]: I1013 07:16:33.324773 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:16:33 crc kubenswrapper[4664]: I1013 07:16:33.819257 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" event={"ID":"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979","Type":"ContainerStarted","Data":"a6cf3ed82822e2892052f5ff5d5187bc63b658a2a08b1473772ebfa74a78ba3b"} Oct 13 07:16:33 crc kubenswrapper[4664]: I1013 07:16:33.848052 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" podStartSLOduration=2.300029175 podStartE2EDuration="2.848025418s" podCreationTimestamp="2025-10-13 07:16:31 +0000 UTC" firstStartedPulling="2025-10-13 07:16:32.773341383 +0000 UTC m=+1800.460786575" lastFinishedPulling="2025-10-13 07:16:33.321337606 +0000 UTC m=+1801.008782818" observedRunningTime="2025-10-13 07:16:33.83611386 +0000 UTC m=+1801.523559062" watchObservedRunningTime="2025-10-13 07:16:33.848025418 +0000 UTC m=+1801.535470620" Oct 13 07:16:38 crc kubenswrapper[4664]: I1013 07:16:38.863532 4664 generic.go:334] "Generic (PLEG): container finished" podID="2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979" containerID="a6cf3ed82822e2892052f5ff5d5187bc63b658a2a08b1473772ebfa74a78ba3b" exitCode=0 Oct 13 07:16:38 crc kubenswrapper[4664]: I1013 07:16:38.863639 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" event={"ID":"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979","Type":"ContainerDied","Data":"a6cf3ed82822e2892052f5ff5d5187bc63b658a2a08b1473772ebfa74a78ba3b"} Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.074513 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-2fnxr"] Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.085726 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-2fnxr"] Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.295537 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.333712 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-ssh-key\") pod \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.333903 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x6g8\" (UniqueName: \"kubernetes.io/projected/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-kube-api-access-4x6g8\") pod \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.333964 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-inventory\") pod \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\" (UID: \"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979\") " Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.342672 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-kube-api-access-4x6g8" (OuterVolumeSpecName: "kube-api-access-4x6g8") pod "2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979" (UID: "2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979"). InnerVolumeSpecName "kube-api-access-4x6g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.365691 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979" (UID: "2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.368451 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-inventory" (OuterVolumeSpecName: "inventory") pod "2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979" (UID: "2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.436760 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.436791 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x6g8\" (UniqueName: \"kubernetes.io/projected/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-kube-api-access-4x6g8\") on node \"crc\" DevicePath \"\"" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.436828 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.883284 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" event={"ID":"2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979","Type":"ContainerDied","Data":"d095c7bd0e08eb5b61f11871389ef4dbb138d6a42ec5ebbaa0082b3845609403"} Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.883318 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d095c7bd0e08eb5b61f11871389ef4dbb138d6a42ec5ebbaa0082b3845609403" Oct 13 07:16:40 crc kubenswrapper[4664]: I1013 07:16:40.883318 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.021270 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6"] Oct 13 07:16:41 crc kubenswrapper[4664]: E1013 07:16:41.021651 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.021668 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.021890 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.022695 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.027223 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.027241 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.027256 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.031174 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.036110 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6"] Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.070992 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2b63677-e86e-4de3-8091-1c36dc8fab29" path="/var/lib/kubelet/pods/b2b63677-e86e-4de3-8091-1c36dc8fab29/volumes" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.163002 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6pc5\" (UniqueName: \"kubernetes.io/projected/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-kube-api-access-v6pc5\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.164037 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.164446 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.266949 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.267231 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6pc5\" (UniqueName: \"kubernetes.io/projected/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-kube-api-access-v6pc5\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.267326 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.271074 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.271765 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.284532 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6pc5\" (UniqueName: \"kubernetes.io/projected/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-kube-api-access-v6pc5\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sdqq6\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.341955 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:16:41 crc kubenswrapper[4664]: I1013 07:16:41.895729 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6"] Oct 13 07:16:41 crc kubenswrapper[4664]: W1013 07:16:41.904025 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda92d9f52_f23c_48e8_a74a_cc9b68d5c362.slice/crio-402878c9ecc2d71d315aadda614434ccc2550fcdd42f12115581103018a105ff WatchSource:0}: Error finding container 402878c9ecc2d71d315aadda614434ccc2550fcdd42f12115581103018a105ff: Status 404 returned error can't find the container with id 402878c9ecc2d71d315aadda614434ccc2550fcdd42f12115581103018a105ff Oct 13 07:16:42 crc kubenswrapper[4664]: I1013 07:16:42.904424 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" event={"ID":"a92d9f52-f23c-48e8-a74a-cc9b68d5c362","Type":"ContainerStarted","Data":"d8a652915c24ed1988acce7c9e74c6abc0b35252374a8f549cf143ae248ec1c4"} Oct 13 07:16:42 crc kubenswrapper[4664]: I1013 07:16:42.904878 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" event={"ID":"a92d9f52-f23c-48e8-a74a-cc9b68d5c362","Type":"ContainerStarted","Data":"402878c9ecc2d71d315aadda614434ccc2550fcdd42f12115581103018a105ff"} Oct 13 07:16:42 crc kubenswrapper[4664]: I1013 07:16:42.937097 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" podStartSLOduration=2.408952228 podStartE2EDuration="2.937074888s" podCreationTimestamp="2025-10-13 07:16:40 +0000 UTC" firstStartedPulling="2025-10-13 07:16:41.908333503 +0000 UTC m=+1809.595778705" lastFinishedPulling="2025-10-13 07:16:42.436456153 +0000 UTC m=+1810.123901365" observedRunningTime="2025-10-13 07:16:42.925728424 +0000 UTC m=+1810.613173646" watchObservedRunningTime="2025-10-13 07:16:42.937074888 +0000 UTC m=+1810.624520120" Oct 13 07:16:43 crc kubenswrapper[4664]: I1013 07:16:43.051699 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:16:43 crc kubenswrapper[4664]: E1013 07:16:43.051947 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:16:55 crc kubenswrapper[4664]: I1013 07:16:55.059094 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-q9n4w"] Oct 13 07:16:55 crc kubenswrapper[4664]: I1013 07:16:55.059817 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-q9n4w"] Oct 13 07:16:56 crc kubenswrapper[4664]: I1013 07:16:56.046351 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:16:56 crc kubenswrapper[4664]: E1013 07:16:56.046761 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:16:57 crc kubenswrapper[4664]: I1013 07:16:57.067849 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b3061d9-9611-4c41-af2e-5c978dc8032c" path="/var/lib/kubelet/pods/7b3061d9-9611-4c41-af2e-5c978dc8032c/volumes" Oct 13 07:17:08 crc kubenswrapper[4664]: I1013 07:17:08.047373 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:17:08 crc kubenswrapper[4664]: E1013 07:17:08.048163 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:17:18 crc kubenswrapper[4664]: I1013 07:17:18.352157 4664 scope.go:117] "RemoveContainer" containerID="1263f9aab96e6a4f8c058ebe5805cd59868dea520ffee7fc4ac711c665860314" Oct 13 07:17:18 crc kubenswrapper[4664]: I1013 07:17:18.425605 4664 scope.go:117] "RemoveContainer" containerID="528c03dfb526f85e0949f7bd1aa1def2b8e23859b10e0221f02932b38a3e81a6" Oct 13 07:17:20 crc kubenswrapper[4664]: I1013 07:17:20.047198 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:17:20 crc kubenswrapper[4664]: E1013 07:17:20.047668 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:17:21 crc kubenswrapper[4664]: I1013 07:17:21.038221 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-9qtdr"] Oct 13 07:17:21 crc kubenswrapper[4664]: I1013 07:17:21.060515 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-9qtdr"] Oct 13 07:17:23 crc kubenswrapper[4664]: I1013 07:17:23.059595 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98cb4b7a-e3b0-496a-8b4c-71c78b879e44" path="/var/lib/kubelet/pods/98cb4b7a-e3b0-496a-8b4c-71c78b879e44/volumes" Oct 13 07:17:23 crc kubenswrapper[4664]: I1013 07:17:23.268187 4664 generic.go:334] "Generic (PLEG): container finished" podID="a92d9f52-f23c-48e8-a74a-cc9b68d5c362" containerID="d8a652915c24ed1988acce7c9e74c6abc0b35252374a8f549cf143ae248ec1c4" exitCode=0 Oct 13 07:17:23 crc kubenswrapper[4664]: I1013 07:17:23.268238 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" event={"ID":"a92d9f52-f23c-48e8-a74a-cc9b68d5c362","Type":"ContainerDied","Data":"d8a652915c24ed1988acce7c9e74c6abc0b35252374a8f549cf143ae248ec1c4"} Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.669827 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.718975 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6pc5\" (UniqueName: \"kubernetes.io/projected/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-kube-api-access-v6pc5\") pod \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.719075 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-inventory\") pod \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.719288 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-ssh-key\") pod \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\" (UID: \"a92d9f52-f23c-48e8-a74a-cc9b68d5c362\") " Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.726972 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-kube-api-access-v6pc5" (OuterVolumeSpecName: "kube-api-access-v6pc5") pod "a92d9f52-f23c-48e8-a74a-cc9b68d5c362" (UID: "a92d9f52-f23c-48e8-a74a-cc9b68d5c362"). InnerVolumeSpecName "kube-api-access-v6pc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.746675 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-inventory" (OuterVolumeSpecName: "inventory") pod "a92d9f52-f23c-48e8-a74a-cc9b68d5c362" (UID: "a92d9f52-f23c-48e8-a74a-cc9b68d5c362"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.750829 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a92d9f52-f23c-48e8-a74a-cc9b68d5c362" (UID: "a92d9f52-f23c-48e8-a74a-cc9b68d5c362"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.822281 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.822342 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6pc5\" (UniqueName: \"kubernetes.io/projected/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-kube-api-access-v6pc5\") on node \"crc\" DevicePath \"\"" Oct 13 07:17:24 crc kubenswrapper[4664]: I1013 07:17:24.822373 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a92d9f52-f23c-48e8-a74a-cc9b68d5c362-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.310403 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" event={"ID":"a92d9f52-f23c-48e8-a74a-cc9b68d5c362","Type":"ContainerDied","Data":"402878c9ecc2d71d315aadda614434ccc2550fcdd42f12115581103018a105ff"} Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.310654 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="402878c9ecc2d71d315aadda614434ccc2550fcdd42f12115581103018a105ff" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.310539 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sdqq6" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.395947 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z"] Oct 13 07:17:25 crc kubenswrapper[4664]: E1013 07:17:25.396387 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92d9f52-f23c-48e8-a74a-cc9b68d5c362" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.396409 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92d9f52-f23c-48e8-a74a-cc9b68d5c362" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.396581 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a92d9f52-f23c-48e8-a74a-cc9b68d5c362" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.397328 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.399245 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.399500 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.399620 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.403086 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.417351 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z"] Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.536839 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws4cd\" (UniqueName: \"kubernetes.io/projected/776354d2-5430-4d54-b3d2-4d7f1880f8bf-kube-api-access-ws4cd\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.536932 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.537298 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.639231 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.639328 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws4cd\" (UniqueName: \"kubernetes.io/projected/776354d2-5430-4d54-b3d2-4d7f1880f8bf-kube-api-access-ws4cd\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.639379 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.646010 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.656640 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.660999 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws4cd\" (UniqueName: \"kubernetes.io/projected/776354d2-5430-4d54-b3d2-4d7f1880f8bf-kube-api-access-ws4cd\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-txl6z\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:25 crc kubenswrapper[4664]: I1013 07:17:25.718690 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:17:26 crc kubenswrapper[4664]: I1013 07:17:26.241898 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z"] Oct 13 07:17:26 crc kubenswrapper[4664]: I1013 07:17:26.323760 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" event={"ID":"776354d2-5430-4d54-b3d2-4d7f1880f8bf","Type":"ContainerStarted","Data":"7227e206920454ff6bac33a08715e3840a3bb55015ef3ae76530274cb0674b31"} Oct 13 07:17:27 crc kubenswrapper[4664]: I1013 07:17:27.343255 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" event={"ID":"776354d2-5430-4d54-b3d2-4d7f1880f8bf","Type":"ContainerStarted","Data":"5b051d4e686ba59991a2d802167eeced41ab67a9bf6e4be4618af0968955b3dd"} Oct 13 07:17:27 crc kubenswrapper[4664]: I1013 07:17:27.386114 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" podStartSLOduration=1.9134983330000002 podStartE2EDuration="2.386090307s" podCreationTimestamp="2025-10-13 07:17:25 +0000 UTC" firstStartedPulling="2025-10-13 07:17:26.246854285 +0000 UTC m=+1853.934299477" lastFinishedPulling="2025-10-13 07:17:26.719446249 +0000 UTC m=+1854.406891451" observedRunningTime="2025-10-13 07:17:27.365767403 +0000 UTC m=+1855.053212605" watchObservedRunningTime="2025-10-13 07:17:27.386090307 +0000 UTC m=+1855.073535509" Oct 13 07:17:33 crc kubenswrapper[4664]: I1013 07:17:33.054703 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:17:33 crc kubenswrapper[4664]: E1013 07:17:33.055528 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:17:48 crc kubenswrapper[4664]: I1013 07:17:48.047933 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:17:48 crc kubenswrapper[4664]: E1013 07:17:48.049254 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:18:00 crc kubenswrapper[4664]: I1013 07:18:00.047751 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:18:00 crc kubenswrapper[4664]: E1013 07:18:00.049371 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:18:13 crc kubenswrapper[4664]: I1013 07:18:13.053707 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:18:13 crc kubenswrapper[4664]: E1013 07:18:13.054545 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:18:18 crc kubenswrapper[4664]: I1013 07:18:18.496522 4664 scope.go:117] "RemoveContainer" containerID="5bed13129d9f588d4806076dd61b7882f0673a7c3684bacdf19306bd9e51287e" Oct 13 07:18:20 crc kubenswrapper[4664]: I1013 07:18:20.854545 4664 generic.go:334] "Generic (PLEG): container finished" podID="776354d2-5430-4d54-b3d2-4d7f1880f8bf" containerID="5b051d4e686ba59991a2d802167eeced41ab67a9bf6e4be4618af0968955b3dd" exitCode=2 Oct 13 07:18:20 crc kubenswrapper[4664]: I1013 07:18:20.854853 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" event={"ID":"776354d2-5430-4d54-b3d2-4d7f1880f8bf","Type":"ContainerDied","Data":"5b051d4e686ba59991a2d802167eeced41ab67a9bf6e4be4618af0968955b3dd"} Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.284268 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.362210 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-inventory\") pod \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.362262 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-ssh-key\") pod \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.362371 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws4cd\" (UniqueName: \"kubernetes.io/projected/776354d2-5430-4d54-b3d2-4d7f1880f8bf-kube-api-access-ws4cd\") pod \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\" (UID: \"776354d2-5430-4d54-b3d2-4d7f1880f8bf\") " Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.368332 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/776354d2-5430-4d54-b3d2-4d7f1880f8bf-kube-api-access-ws4cd" (OuterVolumeSpecName: "kube-api-access-ws4cd") pod "776354d2-5430-4d54-b3d2-4d7f1880f8bf" (UID: "776354d2-5430-4d54-b3d2-4d7f1880f8bf"). InnerVolumeSpecName "kube-api-access-ws4cd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.412715 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-inventory" (OuterVolumeSpecName: "inventory") pod "776354d2-5430-4d54-b3d2-4d7f1880f8bf" (UID: "776354d2-5430-4d54-b3d2-4d7f1880f8bf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.424104 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "776354d2-5430-4d54-b3d2-4d7f1880f8bf" (UID: "776354d2-5430-4d54-b3d2-4d7f1880f8bf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.464898 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.464944 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/776354d2-5430-4d54-b3d2-4d7f1880f8bf-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.464960 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws4cd\" (UniqueName: \"kubernetes.io/projected/776354d2-5430-4d54-b3d2-4d7f1880f8bf-kube-api-access-ws4cd\") on node \"crc\" DevicePath \"\"" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.872684 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" event={"ID":"776354d2-5430-4d54-b3d2-4d7f1880f8bf","Type":"ContainerDied","Data":"7227e206920454ff6bac33a08715e3840a3bb55015ef3ae76530274cb0674b31"} Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.872728 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7227e206920454ff6bac33a08715e3840a3bb55015ef3ae76530274cb0674b31" Oct 13 07:18:22 crc kubenswrapper[4664]: I1013 07:18:22.872746 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-txl6z" Oct 13 07:18:24 crc kubenswrapper[4664]: I1013 07:18:24.047578 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:18:24 crc kubenswrapper[4664]: E1013 07:18:24.048539 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.029911 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r"] Oct 13 07:18:30 crc kubenswrapper[4664]: E1013 07:18:30.031642 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="776354d2-5430-4d54-b3d2-4d7f1880f8bf" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.031726 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="776354d2-5430-4d54-b3d2-4d7f1880f8bf" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.031989 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="776354d2-5430-4d54-b3d2-4d7f1880f8bf" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.032697 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.037871 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.037875 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.038165 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.040240 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.064117 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r"] Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.210276 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clcgv\" (UniqueName: \"kubernetes.io/projected/69834621-9486-4b28-b964-bfb76e7f5a71-kube-api-access-clcgv\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.210313 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.211169 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.313245 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clcgv\" (UniqueName: \"kubernetes.io/projected/69834621-9486-4b28-b964-bfb76e7f5a71-kube-api-access-clcgv\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.313286 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.313341 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.318954 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.322477 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.335055 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clcgv\" (UniqueName: \"kubernetes.io/projected/69834621-9486-4b28-b964-bfb76e7f5a71-kube-api-access-clcgv\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.356275 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.902062 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r"] Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.914479 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:18:30 crc kubenswrapper[4664]: I1013 07:18:30.984918 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" event={"ID":"69834621-9486-4b28-b964-bfb76e7f5a71","Type":"ContainerStarted","Data":"03130b5af06b1d295314c6eb2de719a0916b2bd19e3ee50395fc4d5d5e28b374"} Oct 13 07:18:32 crc kubenswrapper[4664]: I1013 07:18:32.000033 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" event={"ID":"69834621-9486-4b28-b964-bfb76e7f5a71","Type":"ContainerStarted","Data":"200e50f8a63e7ee24168085ff11df8a0eccaafc8d3c389563a809d64e93be187"} Oct 13 07:18:38 crc kubenswrapper[4664]: I1013 07:18:38.046514 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:18:38 crc kubenswrapper[4664]: E1013 07:18:38.047295 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:18:52 crc kubenswrapper[4664]: I1013 07:18:52.047116 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:18:52 crc kubenswrapper[4664]: E1013 07:18:52.047841 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:19:06 crc kubenswrapper[4664]: I1013 07:19:06.047284 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:19:06 crc kubenswrapper[4664]: E1013 07:19:06.048003 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:19:20 crc kubenswrapper[4664]: I1013 07:19:20.046847 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:19:20 crc kubenswrapper[4664]: E1013 07:19:20.047811 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:19:21 crc kubenswrapper[4664]: I1013 07:19:21.438510 4664 generic.go:334] "Generic (PLEG): container finished" podID="69834621-9486-4b28-b964-bfb76e7f5a71" containerID="200e50f8a63e7ee24168085ff11df8a0eccaafc8d3c389563a809d64e93be187" exitCode=0 Oct 13 07:19:21 crc kubenswrapper[4664]: I1013 07:19:21.438830 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" event={"ID":"69834621-9486-4b28-b964-bfb76e7f5a71","Type":"ContainerDied","Data":"200e50f8a63e7ee24168085ff11df8a0eccaafc8d3c389563a809d64e93be187"} Oct 13 07:19:22 crc kubenswrapper[4664]: I1013 07:19:22.967308 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.121140 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-ssh-key\") pod \"69834621-9486-4b28-b964-bfb76e7f5a71\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.121185 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clcgv\" (UniqueName: \"kubernetes.io/projected/69834621-9486-4b28-b964-bfb76e7f5a71-kube-api-access-clcgv\") pod \"69834621-9486-4b28-b964-bfb76e7f5a71\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.121223 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-inventory\") pod \"69834621-9486-4b28-b964-bfb76e7f5a71\" (UID: \"69834621-9486-4b28-b964-bfb76e7f5a71\") " Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.141273 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69834621-9486-4b28-b964-bfb76e7f5a71-kube-api-access-clcgv" (OuterVolumeSpecName: "kube-api-access-clcgv") pod "69834621-9486-4b28-b964-bfb76e7f5a71" (UID: "69834621-9486-4b28-b964-bfb76e7f5a71"). InnerVolumeSpecName "kube-api-access-clcgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.160218 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "69834621-9486-4b28-b964-bfb76e7f5a71" (UID: "69834621-9486-4b28-b964-bfb76e7f5a71"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.173696 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-inventory" (OuterVolumeSpecName: "inventory") pod "69834621-9486-4b28-b964-bfb76e7f5a71" (UID: "69834621-9486-4b28-b964-bfb76e7f5a71"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.223726 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.223761 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clcgv\" (UniqueName: \"kubernetes.io/projected/69834621-9486-4b28-b964-bfb76e7f5a71-kube-api-access-clcgv\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.223775 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69834621-9486-4b28-b964-bfb76e7f5a71-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.459631 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" event={"ID":"69834621-9486-4b28-b964-bfb76e7f5a71","Type":"ContainerDied","Data":"03130b5af06b1d295314c6eb2de719a0916b2bd19e3ee50395fc4d5d5e28b374"} Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.459674 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03130b5af06b1d295314c6eb2de719a0916b2bd19e3ee50395fc4d5d5e28b374" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.459751 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.561970 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-slrqm"] Oct 13 07:19:23 crc kubenswrapper[4664]: E1013 07:19:23.562558 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69834621-9486-4b28-b964-bfb76e7f5a71" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.562590 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="69834621-9486-4b28-b964-bfb76e7f5a71" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.564055 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="69834621-9486-4b28-b964-bfb76e7f5a71" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.565259 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.569155 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.569568 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.569902 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.571197 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.574354 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-slrqm"] Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.734440 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.734608 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.734688 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p477b\" (UniqueName: \"kubernetes.io/projected/63131591-3029-46be-b656-6e833036fa2d-kube-api-access-p477b\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.836022 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.836578 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.836626 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p477b\" (UniqueName: \"kubernetes.io/projected/63131591-3029-46be-b656-6e833036fa2d-kube-api-access-p477b\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.841590 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.842437 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.858499 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p477b\" (UniqueName: \"kubernetes.io/projected/63131591-3029-46be-b656-6e833036fa2d-kube-api-access-p477b\") pod \"ssh-known-hosts-edpm-deployment-slrqm\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:23 crc kubenswrapper[4664]: I1013 07:19:23.888498 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:24 crc kubenswrapper[4664]: I1013 07:19:24.390346 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-slrqm"] Oct 13 07:19:24 crc kubenswrapper[4664]: I1013 07:19:24.469008 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" event={"ID":"63131591-3029-46be-b656-6e833036fa2d","Type":"ContainerStarted","Data":"837510997a6911670aa159b3e7ffb775ebc6e8a2433fd8ce6d802293476b600e"} Oct 13 07:19:25 crc kubenswrapper[4664]: I1013 07:19:25.486523 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" event={"ID":"63131591-3029-46be-b656-6e833036fa2d","Type":"ContainerStarted","Data":"fbbcf5296b7457ec231dfd31f407e57a439441c255af7b2ac1f44dfd349152ec"} Oct 13 07:19:25 crc kubenswrapper[4664]: I1013 07:19:25.516767 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" podStartSLOduration=2.127972994 podStartE2EDuration="2.516738095s" podCreationTimestamp="2025-10-13 07:19:23 +0000 UTC" firstStartedPulling="2025-10-13 07:19:24.400319745 +0000 UTC m=+1972.087764937" lastFinishedPulling="2025-10-13 07:19:24.789084816 +0000 UTC m=+1972.476530038" observedRunningTime="2025-10-13 07:19:25.510070127 +0000 UTC m=+1973.197515349" watchObservedRunningTime="2025-10-13 07:19:25.516738095 +0000 UTC m=+1973.204183317" Oct 13 07:19:31 crc kubenswrapper[4664]: I1013 07:19:31.047505 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:19:31 crc kubenswrapper[4664]: I1013 07:19:31.561784 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"938daf42a7410fde17245b603de7559e885fae294e70ce56c0b6d246481b84f1"} Oct 13 07:19:32 crc kubenswrapper[4664]: I1013 07:19:32.571463 4664 generic.go:334] "Generic (PLEG): container finished" podID="63131591-3029-46be-b656-6e833036fa2d" containerID="fbbcf5296b7457ec231dfd31f407e57a439441c255af7b2ac1f44dfd349152ec" exitCode=0 Oct 13 07:19:32 crc kubenswrapper[4664]: I1013 07:19:32.571577 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" event={"ID":"63131591-3029-46be-b656-6e833036fa2d","Type":"ContainerDied","Data":"fbbcf5296b7457ec231dfd31f407e57a439441c255af7b2ac1f44dfd349152ec"} Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:33.999779 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.043438 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p477b\" (UniqueName: \"kubernetes.io/projected/63131591-3029-46be-b656-6e833036fa2d-kube-api-access-p477b\") pod \"63131591-3029-46be-b656-6e833036fa2d\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.043685 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-inventory-0\") pod \"63131591-3029-46be-b656-6e833036fa2d\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.043780 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-ssh-key-openstack-edpm-ipam\") pod \"63131591-3029-46be-b656-6e833036fa2d\" (UID: \"63131591-3029-46be-b656-6e833036fa2d\") " Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.050745 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63131591-3029-46be-b656-6e833036fa2d-kube-api-access-p477b" (OuterVolumeSpecName: "kube-api-access-p477b") pod "63131591-3029-46be-b656-6e833036fa2d" (UID: "63131591-3029-46be-b656-6e833036fa2d"). InnerVolumeSpecName "kube-api-access-p477b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.077739 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "63131591-3029-46be-b656-6e833036fa2d" (UID: "63131591-3029-46be-b656-6e833036fa2d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.093106 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "63131591-3029-46be-b656-6e833036fa2d" (UID: "63131591-3029-46be-b656-6e833036fa2d"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.145826 4664 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-inventory-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.145870 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/63131591-3029-46be-b656-6e833036fa2d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.145885 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p477b\" (UniqueName: \"kubernetes.io/projected/63131591-3029-46be-b656-6e833036fa2d-kube-api-access-p477b\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.591574 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" event={"ID":"63131591-3029-46be-b656-6e833036fa2d","Type":"ContainerDied","Data":"837510997a6911670aa159b3e7ffb775ebc6e8a2433fd8ce6d802293476b600e"} Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.591612 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="837510997a6911670aa159b3e7ffb775ebc6e8a2433fd8ce6d802293476b600e" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.591672 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-slrqm" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.681121 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv"] Oct 13 07:19:34 crc kubenswrapper[4664]: E1013 07:19:34.681532 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63131591-3029-46be-b656-6e833036fa2d" containerName="ssh-known-hosts-edpm-deployment" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.681551 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="63131591-3029-46be-b656-6e833036fa2d" containerName="ssh-known-hosts-edpm-deployment" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.681757 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="63131591-3029-46be-b656-6e833036fa2d" containerName="ssh-known-hosts-edpm-deployment" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.682459 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.690549 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.690811 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.691043 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.691227 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.700921 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv"] Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.759449 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzvwl\" (UniqueName: \"kubernetes.io/projected/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-kube-api-access-vzvwl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.759504 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.759544 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.861739 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzvwl\" (UniqueName: \"kubernetes.io/projected/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-kube-api-access-vzvwl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.861851 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.861896 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.867359 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.867848 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:34 crc kubenswrapper[4664]: I1013 07:19:34.879857 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzvwl\" (UniqueName: \"kubernetes.io/projected/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-kube-api-access-vzvwl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v66dv\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:35 crc kubenswrapper[4664]: I1013 07:19:35.006353 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:35 crc kubenswrapper[4664]: I1013 07:19:35.539033 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv"] Oct 13 07:19:35 crc kubenswrapper[4664]: I1013 07:19:35.604023 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" event={"ID":"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4","Type":"ContainerStarted","Data":"8aca37cdb2bc9741be931f03d5b0e103f97c8e3dc0f452aee5b99317e0269059"} Oct 13 07:19:36 crc kubenswrapper[4664]: I1013 07:19:36.617092 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" event={"ID":"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4","Type":"ContainerStarted","Data":"056eb72827a3764b6eb8e5fa79f885674471bbd63e90031062e3b75893ed233a"} Oct 13 07:19:36 crc kubenswrapper[4664]: I1013 07:19:36.641063 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" podStartSLOduration=2.199939108 podStartE2EDuration="2.64104799s" podCreationTimestamp="2025-10-13 07:19:34 +0000 UTC" firstStartedPulling="2025-10-13 07:19:35.554805316 +0000 UTC m=+1983.242250508" lastFinishedPulling="2025-10-13 07:19:35.995914198 +0000 UTC m=+1983.683359390" observedRunningTime="2025-10-13 07:19:36.636362015 +0000 UTC m=+1984.323807267" watchObservedRunningTime="2025-10-13 07:19:36.64104799 +0000 UTC m=+1984.328493182" Oct 13 07:19:44 crc kubenswrapper[4664]: I1013 07:19:44.703590 4664 generic.go:334] "Generic (PLEG): container finished" podID="50bfac0b-3fc4-47ba-8570-1b048e9d7ca4" containerID="056eb72827a3764b6eb8e5fa79f885674471bbd63e90031062e3b75893ed233a" exitCode=0 Oct 13 07:19:44 crc kubenswrapper[4664]: I1013 07:19:44.703975 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" event={"ID":"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4","Type":"ContainerDied","Data":"056eb72827a3764b6eb8e5fa79f885674471bbd63e90031062e3b75893ed233a"} Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.157366 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.302184 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-inventory\") pod \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.302238 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzvwl\" (UniqueName: \"kubernetes.io/projected/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-kube-api-access-vzvwl\") pod \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.302382 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-ssh-key\") pod \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\" (UID: \"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4\") " Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.313570 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-kube-api-access-vzvwl" (OuterVolumeSpecName: "kube-api-access-vzvwl") pod "50bfac0b-3fc4-47ba-8570-1b048e9d7ca4" (UID: "50bfac0b-3fc4-47ba-8570-1b048e9d7ca4"). InnerVolumeSpecName "kube-api-access-vzvwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.343267 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "50bfac0b-3fc4-47ba-8570-1b048e9d7ca4" (UID: "50bfac0b-3fc4-47ba-8570-1b048e9d7ca4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.349878 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-inventory" (OuterVolumeSpecName: "inventory") pod "50bfac0b-3fc4-47ba-8570-1b048e9d7ca4" (UID: "50bfac0b-3fc4-47ba-8570-1b048e9d7ca4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.405299 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.405350 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.405370 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzvwl\" (UniqueName: \"kubernetes.io/projected/50bfac0b-3fc4-47ba-8570-1b048e9d7ca4-kube-api-access-vzvwl\") on node \"crc\" DevicePath \"\"" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.723086 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" event={"ID":"50bfac0b-3fc4-47ba-8570-1b048e9d7ca4","Type":"ContainerDied","Data":"8aca37cdb2bc9741be931f03d5b0e103f97c8e3dc0f452aee5b99317e0269059"} Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.723133 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8aca37cdb2bc9741be931f03d5b0e103f97c8e3dc0f452aee5b99317e0269059" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.723148 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v66dv" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.804508 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw"] Oct 13 07:19:46 crc kubenswrapper[4664]: E1013 07:19:46.804950 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50bfac0b-3fc4-47ba-8570-1b048e9d7ca4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.804976 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="50bfac0b-3fc4-47ba-8570-1b048e9d7ca4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.805282 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="50bfac0b-3fc4-47ba-8570-1b048e9d7ca4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.806147 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.813059 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.813224 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.813223 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.813411 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.827613 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw"] Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.915762 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.915946 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:46 crc kubenswrapper[4664]: I1013 07:19:46.915985 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q456\" (UniqueName: \"kubernetes.io/projected/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-kube-api-access-4q456\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.017370 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.017419 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q456\" (UniqueName: \"kubernetes.io/projected/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-kube-api-access-4q456\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.017572 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.025441 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.041859 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.045470 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q456\" (UniqueName: \"kubernetes.io/projected/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-kube-api-access-4q456\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.133545 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.684215 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw"] Oct 13 07:19:47 crc kubenswrapper[4664]: W1013 07:19:47.687972 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3098a2cf_d529_4bd7_9b2a_03111b8e6e2c.slice/crio-50e990bd11374b261b7d1106c8c9cf29f3df0fde1687e18daf7a6e49017d2ec0 WatchSource:0}: Error finding container 50e990bd11374b261b7d1106c8c9cf29f3df0fde1687e18daf7a6e49017d2ec0: Status 404 returned error can't find the container with id 50e990bd11374b261b7d1106c8c9cf29f3df0fde1687e18daf7a6e49017d2ec0 Oct 13 07:19:47 crc kubenswrapper[4664]: I1013 07:19:47.732633 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" event={"ID":"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c","Type":"ContainerStarted","Data":"50e990bd11374b261b7d1106c8c9cf29f3df0fde1687e18daf7a6e49017d2ec0"} Oct 13 07:19:48 crc kubenswrapper[4664]: I1013 07:19:48.743666 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" event={"ID":"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c","Type":"ContainerStarted","Data":"50a8cb2e55421dc0d80f0c0b2bb76727f992f05736d5c96a75bd48765f03c675"} Oct 13 07:19:48 crc kubenswrapper[4664]: I1013 07:19:48.767466 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" podStartSLOduration=2.283858139 podStartE2EDuration="2.767444438s" podCreationTimestamp="2025-10-13 07:19:46 +0000 UTC" firstStartedPulling="2025-10-13 07:19:47.689871356 +0000 UTC m=+1995.377316548" lastFinishedPulling="2025-10-13 07:19:48.173457615 +0000 UTC m=+1995.860902847" observedRunningTime="2025-10-13 07:19:48.757846891 +0000 UTC m=+1996.445292093" watchObservedRunningTime="2025-10-13 07:19:48.767444438 +0000 UTC m=+1996.454889630" Oct 13 07:19:58 crc kubenswrapper[4664]: I1013 07:19:58.827294 4664 generic.go:334] "Generic (PLEG): container finished" podID="3098a2cf-d529-4bd7-9b2a-03111b8e6e2c" containerID="50a8cb2e55421dc0d80f0c0b2bb76727f992f05736d5c96a75bd48765f03c675" exitCode=0 Oct 13 07:19:58 crc kubenswrapper[4664]: I1013 07:19:58.827394 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" event={"ID":"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c","Type":"ContainerDied","Data":"50a8cb2e55421dc0d80f0c0b2bb76727f992f05736d5c96a75bd48765f03c675"} Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.291871 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.388168 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-inventory\") pod \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.388250 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q456\" (UniqueName: \"kubernetes.io/projected/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-kube-api-access-4q456\") pod \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.388312 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-ssh-key\") pod \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\" (UID: \"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c\") " Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.396071 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-kube-api-access-4q456" (OuterVolumeSpecName: "kube-api-access-4q456") pod "3098a2cf-d529-4bd7-9b2a-03111b8e6e2c" (UID: "3098a2cf-d529-4bd7-9b2a-03111b8e6e2c"). InnerVolumeSpecName "kube-api-access-4q456". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.428949 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-inventory" (OuterVolumeSpecName: "inventory") pod "3098a2cf-d529-4bd7-9b2a-03111b8e6e2c" (UID: "3098a2cf-d529-4bd7-9b2a-03111b8e6e2c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.432093 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3098a2cf-d529-4bd7-9b2a-03111b8e6e2c" (UID: "3098a2cf-d529-4bd7-9b2a-03111b8e6e2c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.490915 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.491146 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q456\" (UniqueName: \"kubernetes.io/projected/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-kube-api-access-4q456\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.491347 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3098a2cf-d529-4bd7-9b2a-03111b8e6e2c-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.846816 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" event={"ID":"3098a2cf-d529-4bd7-9b2a-03111b8e6e2c","Type":"ContainerDied","Data":"50e990bd11374b261b7d1106c8c9cf29f3df0fde1687e18daf7a6e49017d2ec0"} Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.847378 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50e990bd11374b261b7d1106c8c9cf29f3df0fde1687e18daf7a6e49017d2ec0" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.847521 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.944393 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27"] Oct 13 07:20:00 crc kubenswrapper[4664]: E1013 07:20:00.944874 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3098a2cf-d529-4bd7-9b2a-03111b8e6e2c" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.944897 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3098a2cf-d529-4bd7-9b2a-03111b8e6e2c" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.945121 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3098a2cf-d529-4bd7-9b2a-03111b8e6e2c" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.945820 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.948570 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.948931 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.949148 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.949296 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.950483 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.950550 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.950634 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.951172 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Oct 13 07:20:00 crc kubenswrapper[4664]: I1013 07:20:00.973035 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27"] Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.009626 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.009903 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.010034 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.010268 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.010399 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.010550 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.010670 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.010768 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.010953 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.011093 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.011238 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2r65\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-kube-api-access-v2r65\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.011343 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.011449 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.011579 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.114111 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2r65\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-kube-api-access-v2r65\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.114528 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.114653 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.114781 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.115001 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.115623 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.115744 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.115947 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.116094 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.116263 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.116379 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.116487 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.116641 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.116771 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.119360 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.119412 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.119611 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.121835 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.121840 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.123217 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.124300 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.124591 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.125077 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.126850 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.128708 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.129659 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.137749 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.143659 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2r65\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-kube-api-access-v2r65\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h4q27\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.265738 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:01 crc kubenswrapper[4664]: I1013 07:20:01.845445 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27"] Oct 13 07:20:02 crc kubenswrapper[4664]: I1013 07:20:02.865155 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" event={"ID":"815f680a-a040-4f4d-acf4-ce60901f32f2","Type":"ContainerStarted","Data":"374d999a9563796b6d82c7c4be4601fae29aed741dd9b762a69721c52f79ce66"} Oct 13 07:20:02 crc kubenswrapper[4664]: I1013 07:20:02.865567 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" event={"ID":"815f680a-a040-4f4d-acf4-ce60901f32f2","Type":"ContainerStarted","Data":"221bcee30355025da48bcf2971443e31e25081e700b38d304c014e305bcafab2"} Oct 13 07:20:42 crc kubenswrapper[4664]: I1013 07:20:42.221623 4664 generic.go:334] "Generic (PLEG): container finished" podID="815f680a-a040-4f4d-acf4-ce60901f32f2" containerID="374d999a9563796b6d82c7c4be4601fae29aed741dd9b762a69721c52f79ce66" exitCode=0 Oct 13 07:20:42 crc kubenswrapper[4664]: I1013 07:20:42.221703 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" event={"ID":"815f680a-a040-4f4d-acf4-ce60901f32f2","Type":"ContainerDied","Data":"374d999a9563796b6d82c7c4be4601fae29aed741dd9b762a69721c52f79ce66"} Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.704216 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785236 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-ovn-default-certs-0\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785338 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-repo-setup-combined-ca-bundle\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785393 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-libvirt-combined-ca-bundle\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785428 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-neutron-metadata-combined-ca-bundle\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785470 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785506 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ovn-combined-ca-bundle\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785586 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785725 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-inventory\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785776 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ssh-key\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785883 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785927 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2r65\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-kube-api-access-v2r65\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.785998 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-nova-combined-ca-bundle\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.786053 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-telemetry-combined-ca-bundle\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.786093 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-bootstrap-combined-ca-bundle\") pod \"815f680a-a040-4f4d-acf4-ce60901f32f2\" (UID: \"815f680a-a040-4f4d-acf4-ce60901f32f2\") " Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.791867 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.792159 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.793974 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.796177 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-kube-api-access-v2r65" (OuterVolumeSpecName: "kube-api-access-v2r65") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "kube-api-access-v2r65". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.797649 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.799041 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.799481 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.799631 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.803162 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.805700 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.808188 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.809330 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.829563 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-inventory" (OuterVolumeSpecName: "inventory") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.844493 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "815f680a-a040-4f4d-acf4-ce60901f32f2" (UID: "815f680a-a040-4f4d-acf4-ce60901f32f2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.888611 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.888941 4664 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.888953 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.888964 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.888973 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.888983 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.888993 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2r65\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-kube-api-access-v2r65\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.889001 4664 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.889010 4664 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.889021 4664 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.889031 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/815f680a-a040-4f4d-acf4-ce60901f32f2-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.889040 4664 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.889048 4664 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:43 crc kubenswrapper[4664]: I1013 07:20:43.889057 4664 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/815f680a-a040-4f4d-acf4-ce60901f32f2-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.244299 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" event={"ID":"815f680a-a040-4f4d-acf4-ce60901f32f2","Type":"ContainerDied","Data":"221bcee30355025da48bcf2971443e31e25081e700b38d304c014e305bcafab2"} Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.244341 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="221bcee30355025da48bcf2971443e31e25081e700b38d304c014e305bcafab2" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.244377 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h4q27" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.374500 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t"] Oct 13 07:20:44 crc kubenswrapper[4664]: E1013 07:20:44.375014 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="815f680a-a040-4f4d-acf4-ce60901f32f2" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.375030 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="815f680a-a040-4f4d-acf4-ce60901f32f2" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.375229 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="815f680a-a040-4f4d-acf4-ce60901f32f2" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.375951 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.379656 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.379993 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.379909 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.379936 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.380598 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.382078 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t"] Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.502280 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.502335 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.502435 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.502468 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f741ab28-9784-40cc-a21b-858af4ffb104-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.502487 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tfr8\" (UniqueName: \"kubernetes.io/projected/f741ab28-9784-40cc-a21b-858af4ffb104-kube-api-access-8tfr8\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.604310 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.604384 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f741ab28-9784-40cc-a21b-858af4ffb104-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.604416 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tfr8\" (UniqueName: \"kubernetes.io/projected/f741ab28-9784-40cc-a21b-858af4ffb104-kube-api-access-8tfr8\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.604496 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.604555 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.605612 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f741ab28-9784-40cc-a21b-858af4ffb104-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.609008 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.609016 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.619972 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tfr8\" (UniqueName: \"kubernetes.io/projected/f741ab28-9784-40cc-a21b-858af4ffb104-kube-api-access-8tfr8\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.620723 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gt52t\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:44 crc kubenswrapper[4664]: I1013 07:20:44.706952 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:20:45 crc kubenswrapper[4664]: I1013 07:20:45.307256 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t"] Oct 13 07:20:46 crc kubenswrapper[4664]: I1013 07:20:46.265826 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" event={"ID":"f741ab28-9784-40cc-a21b-858af4ffb104","Type":"ContainerStarted","Data":"3204ca624edeb84766284be10877b38fd65eafe9fab1d03a76ecdbc55564ad98"} Oct 13 07:20:47 crc kubenswrapper[4664]: I1013 07:20:47.274824 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" event={"ID":"f741ab28-9784-40cc-a21b-858af4ffb104","Type":"ContainerStarted","Data":"08f1ad503b463357473dadd21b0882ec88650a05618bac1a6935bf4404bd4fd9"} Oct 13 07:20:47 crc kubenswrapper[4664]: I1013 07:20:47.297840 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" podStartSLOduration=2.596481773 podStartE2EDuration="3.297823258s" podCreationTimestamp="2025-10-13 07:20:44 +0000 UTC" firstStartedPulling="2025-10-13 07:20:45.330641671 +0000 UTC m=+2053.018086863" lastFinishedPulling="2025-10-13 07:20:46.031983156 +0000 UTC m=+2053.719428348" observedRunningTime="2025-10-13 07:20:47.293316797 +0000 UTC m=+2054.980761999" watchObservedRunningTime="2025-10-13 07:20:47.297823258 +0000 UTC m=+2054.985268460" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.425317 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wv6zr"] Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.427563 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.452439 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wv6zr"] Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.524354 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-222b4\" (UniqueName: \"kubernetes.io/projected/869067ba-2703-4fea-b5f4-13710cedc41c-kube-api-access-222b4\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.524492 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-catalog-content\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.524670 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-utilities\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.627064 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-222b4\" (UniqueName: \"kubernetes.io/projected/869067ba-2703-4fea-b5f4-13710cedc41c-kube-api-access-222b4\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.627179 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-catalog-content\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.627243 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-utilities\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.627812 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-catalog-content\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.627881 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-utilities\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.668712 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-222b4\" (UniqueName: \"kubernetes.io/projected/869067ba-2703-4fea-b5f4-13710cedc41c-kube-api-access-222b4\") pod \"redhat-operators-wv6zr\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:55 crc kubenswrapper[4664]: I1013 07:20:55.750926 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:20:56 crc kubenswrapper[4664]: I1013 07:20:56.269864 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wv6zr"] Oct 13 07:20:56 crc kubenswrapper[4664]: I1013 07:20:56.356152 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv6zr" event={"ID":"869067ba-2703-4fea-b5f4-13710cedc41c","Type":"ContainerStarted","Data":"652c4b34255bdc632116927606d165a65df1bcb6882bfe27b8f0ec3027d44250"} Oct 13 07:20:57 crc kubenswrapper[4664]: I1013 07:20:57.366978 4664 generic.go:334] "Generic (PLEG): container finished" podID="869067ba-2703-4fea-b5f4-13710cedc41c" containerID="b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee" exitCode=0 Oct 13 07:20:57 crc kubenswrapper[4664]: I1013 07:20:57.367037 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv6zr" event={"ID":"869067ba-2703-4fea-b5f4-13710cedc41c","Type":"ContainerDied","Data":"b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee"} Oct 13 07:20:59 crc kubenswrapper[4664]: I1013 07:20:59.391370 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv6zr" event={"ID":"869067ba-2703-4fea-b5f4-13710cedc41c","Type":"ContainerStarted","Data":"5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61"} Oct 13 07:21:03 crc kubenswrapper[4664]: I1013 07:21:03.425030 4664 generic.go:334] "Generic (PLEG): container finished" podID="869067ba-2703-4fea-b5f4-13710cedc41c" containerID="5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61" exitCode=0 Oct 13 07:21:03 crc kubenswrapper[4664]: I1013 07:21:03.425115 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv6zr" event={"ID":"869067ba-2703-4fea-b5f4-13710cedc41c","Type":"ContainerDied","Data":"5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61"} Oct 13 07:21:04 crc kubenswrapper[4664]: I1013 07:21:04.440672 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv6zr" event={"ID":"869067ba-2703-4fea-b5f4-13710cedc41c","Type":"ContainerStarted","Data":"b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b"} Oct 13 07:21:04 crc kubenswrapper[4664]: I1013 07:21:04.457441 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wv6zr" podStartSLOduration=3.003819067 podStartE2EDuration="9.457418079s" podCreationTimestamp="2025-10-13 07:20:55 +0000 UTC" firstStartedPulling="2025-10-13 07:20:57.368527012 +0000 UTC m=+2065.055972204" lastFinishedPulling="2025-10-13 07:21:03.822126034 +0000 UTC m=+2071.509571216" observedRunningTime="2025-10-13 07:21:04.454258234 +0000 UTC m=+2072.141703426" watchObservedRunningTime="2025-10-13 07:21:04.457418079 +0000 UTC m=+2072.144863291" Oct 13 07:21:05 crc kubenswrapper[4664]: I1013 07:21:05.751930 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:21:05 crc kubenswrapper[4664]: I1013 07:21:05.752262 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:21:06 crc kubenswrapper[4664]: I1013 07:21:06.796147 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wv6zr" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="registry-server" probeResult="failure" output=< Oct 13 07:21:06 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:21:06 crc kubenswrapper[4664]: > Oct 13 07:21:16 crc kubenswrapper[4664]: I1013 07:21:16.798588 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wv6zr" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="registry-server" probeResult="failure" output=< Oct 13 07:21:16 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:21:16 crc kubenswrapper[4664]: > Oct 13 07:21:25 crc kubenswrapper[4664]: I1013 07:21:25.806561 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:21:25 crc kubenswrapper[4664]: I1013 07:21:25.858912 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:21:26 crc kubenswrapper[4664]: I1013 07:21:26.634138 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wv6zr"] Oct 13 07:21:27 crc kubenswrapper[4664]: I1013 07:21:27.645895 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wv6zr" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="registry-server" containerID="cri-o://b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b" gracePeriod=2 Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.148488 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.261080 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-222b4\" (UniqueName: \"kubernetes.io/projected/869067ba-2703-4fea-b5f4-13710cedc41c-kube-api-access-222b4\") pod \"869067ba-2703-4fea-b5f4-13710cedc41c\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.261158 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-catalog-content\") pod \"869067ba-2703-4fea-b5f4-13710cedc41c\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.261318 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-utilities\") pod \"869067ba-2703-4fea-b5f4-13710cedc41c\" (UID: \"869067ba-2703-4fea-b5f4-13710cedc41c\") " Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.262620 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-utilities" (OuterVolumeSpecName: "utilities") pod "869067ba-2703-4fea-b5f4-13710cedc41c" (UID: "869067ba-2703-4fea-b5f4-13710cedc41c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.268425 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/869067ba-2703-4fea-b5f4-13710cedc41c-kube-api-access-222b4" (OuterVolumeSpecName: "kube-api-access-222b4") pod "869067ba-2703-4fea-b5f4-13710cedc41c" (UID: "869067ba-2703-4fea-b5f4-13710cedc41c"). InnerVolumeSpecName "kube-api-access-222b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.356948 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "869067ba-2703-4fea-b5f4-13710cedc41c" (UID: "869067ba-2703-4fea-b5f4-13710cedc41c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.364015 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-222b4\" (UniqueName: \"kubernetes.io/projected/869067ba-2703-4fea-b5f4-13710cedc41c-kube-api-access-222b4\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.364047 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.364062 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869067ba-2703-4fea-b5f4-13710cedc41c-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.655528 4664 generic.go:334] "Generic (PLEG): container finished" podID="869067ba-2703-4fea-b5f4-13710cedc41c" containerID="b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b" exitCode=0 Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.655568 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv6zr" event={"ID":"869067ba-2703-4fea-b5f4-13710cedc41c","Type":"ContainerDied","Data":"b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b"} Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.655594 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv6zr" event={"ID":"869067ba-2703-4fea-b5f4-13710cedc41c","Type":"ContainerDied","Data":"652c4b34255bdc632116927606d165a65df1bcb6882bfe27b8f0ec3027d44250"} Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.655611 4664 scope.go:117] "RemoveContainer" containerID="b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.655719 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv6zr" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.688581 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wv6zr"] Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.692039 4664 scope.go:117] "RemoveContainer" containerID="5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.697830 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wv6zr"] Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.714850 4664 scope.go:117] "RemoveContainer" containerID="b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.756812 4664 scope.go:117] "RemoveContainer" containerID="b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b" Oct 13 07:21:28 crc kubenswrapper[4664]: E1013 07:21:28.757261 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b\": container with ID starting with b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b not found: ID does not exist" containerID="b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.757299 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b"} err="failed to get container status \"b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b\": rpc error: code = NotFound desc = could not find container \"b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b\": container with ID starting with b090f415318dce062725028c874b71f2b3475b48ce235d85218a52620f3d7f9b not found: ID does not exist" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.757324 4664 scope.go:117] "RemoveContainer" containerID="5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61" Oct 13 07:21:28 crc kubenswrapper[4664]: E1013 07:21:28.757632 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61\": container with ID starting with 5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61 not found: ID does not exist" containerID="5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.757662 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61"} err="failed to get container status \"5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61\": rpc error: code = NotFound desc = could not find container \"5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61\": container with ID starting with 5dfe48524717c181369a665e7e33b8d52a12b1c943a5485e5ccdf6f79d202a61 not found: ID does not exist" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.757676 4664 scope.go:117] "RemoveContainer" containerID="b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee" Oct 13 07:21:28 crc kubenswrapper[4664]: E1013 07:21:28.758647 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee\": container with ID starting with b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee not found: ID does not exist" containerID="b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee" Oct 13 07:21:28 crc kubenswrapper[4664]: I1013 07:21:28.758724 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee"} err="failed to get container status \"b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee\": rpc error: code = NotFound desc = could not find container \"b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee\": container with ID starting with b7cf57c0dffa14d8d8d3bff9424c27b91018431cf57358406c4390a212e16cee not found: ID does not exist" Oct 13 07:21:29 crc kubenswrapper[4664]: I1013 07:21:29.061959 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" path="/var/lib/kubelet/pods/869067ba-2703-4fea-b5f4-13710cedc41c/volumes" Oct 13 07:21:50 crc kubenswrapper[4664]: I1013 07:21:50.871085 4664 generic.go:334] "Generic (PLEG): container finished" podID="f741ab28-9784-40cc-a21b-858af4ffb104" containerID="08f1ad503b463357473dadd21b0882ec88650a05618bac1a6935bf4404bd4fd9" exitCode=0 Oct 13 07:21:50 crc kubenswrapper[4664]: I1013 07:21:50.871214 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" event={"ID":"f741ab28-9784-40cc-a21b-858af4ffb104","Type":"ContainerDied","Data":"08f1ad503b463357473dadd21b0882ec88650a05618bac1a6935bf4404bd4fd9"} Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.286088 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.426577 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tfr8\" (UniqueName: \"kubernetes.io/projected/f741ab28-9784-40cc-a21b-858af4ffb104-kube-api-access-8tfr8\") pod \"f741ab28-9784-40cc-a21b-858af4ffb104\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.426626 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f741ab28-9784-40cc-a21b-858af4ffb104-ovncontroller-config-0\") pod \"f741ab28-9784-40cc-a21b-858af4ffb104\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.426674 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-inventory\") pod \"f741ab28-9784-40cc-a21b-858af4ffb104\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.426866 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ssh-key\") pod \"f741ab28-9784-40cc-a21b-858af4ffb104\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.426896 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ovn-combined-ca-bundle\") pod \"f741ab28-9784-40cc-a21b-858af4ffb104\" (UID: \"f741ab28-9784-40cc-a21b-858af4ffb104\") " Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.434437 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "f741ab28-9784-40cc-a21b-858af4ffb104" (UID: "f741ab28-9784-40cc-a21b-858af4ffb104"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.439737 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f741ab28-9784-40cc-a21b-858af4ffb104-kube-api-access-8tfr8" (OuterVolumeSpecName: "kube-api-access-8tfr8") pod "f741ab28-9784-40cc-a21b-858af4ffb104" (UID: "f741ab28-9784-40cc-a21b-858af4ffb104"). InnerVolumeSpecName "kube-api-access-8tfr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.454293 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f741ab28-9784-40cc-a21b-858af4ffb104" (UID: "f741ab28-9784-40cc-a21b-858af4ffb104"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.457304 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-inventory" (OuterVolumeSpecName: "inventory") pod "f741ab28-9784-40cc-a21b-858af4ffb104" (UID: "f741ab28-9784-40cc-a21b-858af4ffb104"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.476583 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f741ab28-9784-40cc-a21b-858af4ffb104-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "f741ab28-9784-40cc-a21b-858af4ffb104" (UID: "f741ab28-9784-40cc-a21b-858af4ffb104"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.530102 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tfr8\" (UniqueName: \"kubernetes.io/projected/f741ab28-9784-40cc-a21b-858af4ffb104-kube-api-access-8tfr8\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.530152 4664 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f741ab28-9784-40cc-a21b-858af4ffb104-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.530173 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.530188 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.530230 4664 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f741ab28-9784-40cc-a21b-858af4ffb104-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.891751 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" event={"ID":"f741ab28-9784-40cc-a21b-858af4ffb104","Type":"ContainerDied","Data":"3204ca624edeb84766284be10877b38fd65eafe9fab1d03a76ecdbc55564ad98"} Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.891895 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3204ca624edeb84766284be10877b38fd65eafe9fab1d03a76ecdbc55564ad98" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.891851 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gt52t" Oct 13 07:21:52 crc kubenswrapper[4664]: I1013 07:21:52.998893 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44"] Oct 13 07:21:53 crc kubenswrapper[4664]: E1013 07:21:53.000531 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="extract-content" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.000557 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="extract-content" Oct 13 07:21:53 crc kubenswrapper[4664]: E1013 07:21:53.000597 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f741ab28-9784-40cc-a21b-858af4ffb104" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.000606 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f741ab28-9784-40cc-a21b-858af4ffb104" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 13 07:21:53 crc kubenswrapper[4664]: E1013 07:21:53.000631 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="registry-server" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.000639 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="registry-server" Oct 13 07:21:53 crc kubenswrapper[4664]: E1013 07:21:53.000655 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="extract-utilities" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.000663 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="extract-utilities" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.000875 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="869067ba-2703-4fea-b5f4-13710cedc41c" containerName="registry-server" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.000905 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f741ab28-9784-40cc-a21b-858af4ffb104" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.003444 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.007694 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.008014 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.009018 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.009329 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.009514 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.010491 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.012359 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44"] Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.141864 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.141940 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.142002 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.142022 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.142147 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.142391 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmfl8\" (UniqueName: \"kubernetes.io/projected/8977ee61-f509-4bc9-bd69-ebefbfdafda9-kube-api-access-cmfl8\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.243948 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmfl8\" (UniqueName: \"kubernetes.io/projected/8977ee61-f509-4bc9-bd69-ebefbfdafda9-kube-api-access-cmfl8\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.244329 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.245317 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.245560 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.246096 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.246305 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.251433 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.251860 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.252103 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.254975 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.258623 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.262250 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmfl8\" (UniqueName: \"kubernetes.io/projected/8977ee61-f509-4bc9-bd69-ebefbfdafda9-kube-api-access-cmfl8\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.318499 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.838396 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44"] Oct 13 07:21:53 crc kubenswrapper[4664]: I1013 07:21:53.901231 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" event={"ID":"8977ee61-f509-4bc9-bd69-ebefbfdafda9","Type":"ContainerStarted","Data":"7d53dc93ef7b4e1321a92638894822e879022b3614bbf29294afd691b3903088"} Oct 13 07:21:54 crc kubenswrapper[4664]: I1013 07:21:54.916681 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" event={"ID":"8977ee61-f509-4bc9-bd69-ebefbfdafda9","Type":"ContainerStarted","Data":"64ef5c0001ac8234d32f75d32e77028fc302d4473b604d904922a5c34f1fb45f"} Oct 13 07:21:54 crc kubenswrapper[4664]: I1013 07:21:54.948486 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" podStartSLOduration=2.520669677 podStartE2EDuration="2.948452621s" podCreationTimestamp="2025-10-13 07:21:52 +0000 UTC" firstStartedPulling="2025-10-13 07:21:53.841692232 +0000 UTC m=+2121.529137414" lastFinishedPulling="2025-10-13 07:21:54.269475166 +0000 UTC m=+2121.956920358" observedRunningTime="2025-10-13 07:21:54.947595048 +0000 UTC m=+2122.635040250" watchObservedRunningTime="2025-10-13 07:21:54.948452621 +0000 UTC m=+2122.635897863" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.515525 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jswpl"] Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.518495 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.530641 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jswpl"] Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.625091 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-catalog-content\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.625157 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-utilities\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.625190 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rfgz\" (UniqueName: \"kubernetes.io/projected/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-kube-api-access-7rfgz\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.726353 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-catalog-content\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.726427 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-utilities\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.726466 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rfgz\" (UniqueName: \"kubernetes.io/projected/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-kube-api-access-7rfgz\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.726880 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-utilities\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.727300 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-catalog-content\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.748384 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rfgz\" (UniqueName: \"kubernetes.io/projected/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-kube-api-access-7rfgz\") pod \"redhat-marketplace-jswpl\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:56 crc kubenswrapper[4664]: I1013 07:21:56.837342 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:21:57 crc kubenswrapper[4664]: I1013 07:21:57.345788 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jswpl"] Oct 13 07:21:57 crc kubenswrapper[4664]: W1013 07:21:57.359350 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c8d23b2_e75d_44eb_981f_237a4bf7f4a0.slice/crio-df29c0bfbc494e863dba35517c54952d6927c991df9576a127492f5ce00fb729 WatchSource:0}: Error finding container df29c0bfbc494e863dba35517c54952d6927c991df9576a127492f5ce00fb729: Status 404 returned error can't find the container with id df29c0bfbc494e863dba35517c54952d6927c991df9576a127492f5ce00fb729 Oct 13 07:21:57 crc kubenswrapper[4664]: I1013 07:21:57.951619 4664 generic.go:334] "Generic (PLEG): container finished" podID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerID="0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0" exitCode=0 Oct 13 07:21:57 crc kubenswrapper[4664]: I1013 07:21:57.951702 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jswpl" event={"ID":"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0","Type":"ContainerDied","Data":"0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0"} Oct 13 07:21:57 crc kubenswrapper[4664]: I1013 07:21:57.951896 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jswpl" event={"ID":"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0","Type":"ContainerStarted","Data":"df29c0bfbc494e863dba35517c54952d6927c991df9576a127492f5ce00fb729"} Oct 13 07:21:58 crc kubenswrapper[4664]: I1013 07:21:58.812233 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:21:58 crc kubenswrapper[4664]: I1013 07:21:58.812308 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:21:58 crc kubenswrapper[4664]: I1013 07:21:58.962480 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jswpl" event={"ID":"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0","Type":"ContainerStarted","Data":"66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55"} Oct 13 07:21:59 crc kubenswrapper[4664]: I1013 07:21:59.974847 4664 generic.go:334] "Generic (PLEG): container finished" podID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerID="66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55" exitCode=0 Oct 13 07:21:59 crc kubenswrapper[4664]: I1013 07:21:59.974892 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jswpl" event={"ID":"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0","Type":"ContainerDied","Data":"66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55"} Oct 13 07:22:00 crc kubenswrapper[4664]: I1013 07:22:00.984624 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jswpl" event={"ID":"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0","Type":"ContainerStarted","Data":"c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8"} Oct 13 07:22:06 crc kubenswrapper[4664]: I1013 07:22:06.839090 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:22:06 crc kubenswrapper[4664]: I1013 07:22:06.839668 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:22:06 crc kubenswrapper[4664]: I1013 07:22:06.900881 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:22:06 crc kubenswrapper[4664]: I1013 07:22:06.936030 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jswpl" podStartSLOduration=8.485139645 podStartE2EDuration="10.935992052s" podCreationTimestamp="2025-10-13 07:21:56 +0000 UTC" firstStartedPulling="2025-10-13 07:21:57.953063269 +0000 UTC m=+2125.640508461" lastFinishedPulling="2025-10-13 07:22:00.403915686 +0000 UTC m=+2128.091360868" observedRunningTime="2025-10-13 07:22:01.006208006 +0000 UTC m=+2128.693653208" watchObservedRunningTime="2025-10-13 07:22:06.935992052 +0000 UTC m=+2134.623437284" Oct 13 07:22:07 crc kubenswrapper[4664]: I1013 07:22:07.101080 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:22:07 crc kubenswrapper[4664]: I1013 07:22:07.160273 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jswpl"] Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.056242 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jswpl" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="registry-server" containerID="cri-o://c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8" gracePeriod=2 Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.533278 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.683064 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rfgz\" (UniqueName: \"kubernetes.io/projected/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-kube-api-access-7rfgz\") pod \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.683165 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-catalog-content\") pod \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.683282 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-utilities\") pod \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\" (UID: \"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0\") " Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.684072 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-utilities" (OuterVolumeSpecName: "utilities") pod "6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" (UID: "6c8d23b2-e75d-44eb-981f-237a4bf7f4a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.690964 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-kube-api-access-7rfgz" (OuterVolumeSpecName: "kube-api-access-7rfgz") pod "6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" (UID: "6c8d23b2-e75d-44eb-981f-237a4bf7f4a0"). InnerVolumeSpecName "kube-api-access-7rfgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.694993 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" (UID: "6c8d23b2-e75d-44eb-981f-237a4bf7f4a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.785332 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rfgz\" (UniqueName: \"kubernetes.io/projected/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-kube-api-access-7rfgz\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.785376 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:09 crc kubenswrapper[4664]: I1013 07:22:09.785388 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.091138 4664 generic.go:334] "Generic (PLEG): container finished" podID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerID="c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8" exitCode=0 Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.092541 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jswpl" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.092569 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jswpl" event={"ID":"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0","Type":"ContainerDied","Data":"c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8"} Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.093927 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jswpl" event={"ID":"6c8d23b2-e75d-44eb-981f-237a4bf7f4a0","Type":"ContainerDied","Data":"df29c0bfbc494e863dba35517c54952d6927c991df9576a127492f5ce00fb729"} Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.093999 4664 scope.go:117] "RemoveContainer" containerID="c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.134089 4664 scope.go:117] "RemoveContainer" containerID="66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.161856 4664 scope.go:117] "RemoveContainer" containerID="0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.172504 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jswpl"] Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.187292 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jswpl"] Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.219546 4664 scope.go:117] "RemoveContainer" containerID="c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8" Oct 13 07:22:10 crc kubenswrapper[4664]: E1013 07:22:10.228531 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8\": container with ID starting with c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8 not found: ID does not exist" containerID="c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.228591 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8"} err="failed to get container status \"c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8\": rpc error: code = NotFound desc = could not find container \"c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8\": container with ID starting with c8c1ab5d31269ec39354b66e32ddee6b8df114184eec73632321489c917176a8 not found: ID does not exist" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.228624 4664 scope.go:117] "RemoveContainer" containerID="66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55" Oct 13 07:22:10 crc kubenswrapper[4664]: E1013 07:22:10.229347 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55\": container with ID starting with 66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55 not found: ID does not exist" containerID="66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.229401 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55"} err="failed to get container status \"66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55\": rpc error: code = NotFound desc = could not find container \"66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55\": container with ID starting with 66035f8488e6dfb960da4fb9fcca798596d1376b7e439fe21bd81ce133c13a55 not found: ID does not exist" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.229429 4664 scope.go:117] "RemoveContainer" containerID="0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0" Oct 13 07:22:10 crc kubenswrapper[4664]: E1013 07:22:10.236676 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0\": container with ID starting with 0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0 not found: ID does not exist" containerID="0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0" Oct 13 07:22:10 crc kubenswrapper[4664]: I1013 07:22:10.236735 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0"} err="failed to get container status \"0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0\": rpc error: code = NotFound desc = could not find container \"0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0\": container with ID starting with 0427655fec3e7a306c493f9e939fd7ade0c3041e6ecdd02bae94b2937b68a6d0 not found: ID does not exist" Oct 13 07:22:11 crc kubenswrapper[4664]: I1013 07:22:11.059046 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" path="/var/lib/kubelet/pods/6c8d23b2-e75d-44eb-981f-237a4bf7f4a0/volumes" Oct 13 07:22:28 crc kubenswrapper[4664]: I1013 07:22:28.812777 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:22:28 crc kubenswrapper[4664]: I1013 07:22:28.813297 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:22:42 crc kubenswrapper[4664]: I1013 07:22:42.431866 4664 generic.go:334] "Generic (PLEG): container finished" podID="8977ee61-f509-4bc9-bd69-ebefbfdafda9" containerID="64ef5c0001ac8234d32f75d32e77028fc302d4473b604d904922a5c34f1fb45f" exitCode=0 Oct 13 07:22:42 crc kubenswrapper[4664]: I1013 07:22:42.431976 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" event={"ID":"8977ee61-f509-4bc9-bd69-ebefbfdafda9","Type":"ContainerDied","Data":"64ef5c0001ac8234d32f75d32e77028fc302d4473b604d904922a5c34f1fb45f"} Oct 13 07:22:43 crc kubenswrapper[4664]: I1013 07:22:43.898142 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.063691 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-metadata-combined-ca-bundle\") pod \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.064001 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-inventory\") pod \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.064094 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmfl8\" (UniqueName: \"kubernetes.io/projected/8977ee61-f509-4bc9-bd69-ebefbfdafda9-kube-api-access-cmfl8\") pod \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.064201 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-ssh-key\") pod \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.064227 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-nova-metadata-neutron-config-0\") pod \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.064252 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-ovn-metadata-agent-neutron-config-0\") pod \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\" (UID: \"8977ee61-f509-4bc9-bd69-ebefbfdafda9\") " Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.070636 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8977ee61-f509-4bc9-bd69-ebefbfdafda9-kube-api-access-cmfl8" (OuterVolumeSpecName: "kube-api-access-cmfl8") pod "8977ee61-f509-4bc9-bd69-ebefbfdafda9" (UID: "8977ee61-f509-4bc9-bd69-ebefbfdafda9"). InnerVolumeSpecName "kube-api-access-cmfl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.071156 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "8977ee61-f509-4bc9-bd69-ebefbfdafda9" (UID: "8977ee61-f509-4bc9-bd69-ebefbfdafda9"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.110394 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "8977ee61-f509-4bc9-bd69-ebefbfdafda9" (UID: "8977ee61-f509-4bc9-bd69-ebefbfdafda9"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.117651 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "8977ee61-f509-4bc9-bd69-ebefbfdafda9" (UID: "8977ee61-f509-4bc9-bd69-ebefbfdafda9"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.117671 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8977ee61-f509-4bc9-bd69-ebefbfdafda9" (UID: "8977ee61-f509-4bc9-bd69-ebefbfdafda9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.123962 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-inventory" (OuterVolumeSpecName: "inventory") pod "8977ee61-f509-4bc9-bd69-ebefbfdafda9" (UID: "8977ee61-f509-4bc9-bd69-ebefbfdafda9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.166030 4664 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.166078 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.166095 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmfl8\" (UniqueName: \"kubernetes.io/projected/8977ee61-f509-4bc9-bd69-ebefbfdafda9-kube-api-access-cmfl8\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.166109 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.166126 4664 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.166140 4664 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8977ee61-f509-4bc9-bd69-ebefbfdafda9-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.468835 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" event={"ID":"8977ee61-f509-4bc9-bd69-ebefbfdafda9","Type":"ContainerDied","Data":"7d53dc93ef7b4e1321a92638894822e879022b3614bbf29294afd691b3903088"} Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.468894 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d53dc93ef7b4e1321a92638894822e879022b3614bbf29294afd691b3903088" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.468969 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.566497 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs"] Oct 13 07:22:44 crc kubenswrapper[4664]: E1013 07:22:44.566984 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="extract-utilities" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.567002 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="extract-utilities" Oct 13 07:22:44 crc kubenswrapper[4664]: E1013 07:22:44.567042 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="registry-server" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.567048 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="registry-server" Oct 13 07:22:44 crc kubenswrapper[4664]: E1013 07:22:44.567078 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="extract-content" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.567088 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="extract-content" Oct 13 07:22:44 crc kubenswrapper[4664]: E1013 07:22:44.567103 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8977ee61-f509-4bc9-bd69-ebefbfdafda9" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.567112 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="8977ee61-f509-4bc9-bd69-ebefbfdafda9" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.567349 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="8977ee61-f509-4bc9-bd69-ebefbfdafda9" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.567372 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c8d23b2-e75d-44eb-981f-237a4bf7f4a0" containerName="registry-server" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.568173 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.571860 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.572194 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.572355 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.572495 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.572657 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.588041 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs"] Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.676215 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.676352 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4znv9\" (UniqueName: \"kubernetes.io/projected/0f92f225-d9b3-4d85-967e-b878358e05e4-kube-api-access-4znv9\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.676418 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.676528 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.676577 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.778156 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.779058 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4znv9\" (UniqueName: \"kubernetes.io/projected/0f92f225-d9b3-4d85-967e-b878358e05e4-kube-api-access-4znv9\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.779123 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.779203 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.779233 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.785321 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.801460 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.817996 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.825615 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.830591 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4znv9\" (UniqueName: \"kubernetes.io/projected/0f92f225-d9b3-4d85-967e-b878358e05e4-kube-api-access-4znv9\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:44 crc kubenswrapper[4664]: I1013 07:22:44.905415 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:22:45 crc kubenswrapper[4664]: I1013 07:22:45.467884 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs"] Oct 13 07:22:46 crc kubenswrapper[4664]: I1013 07:22:46.491448 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" event={"ID":"0f92f225-d9b3-4d85-967e-b878358e05e4","Type":"ContainerStarted","Data":"298c9168d244b548151678aa35cd8a863f0ab29cd457ca74f4105858c6814d8b"} Oct 13 07:22:46 crc kubenswrapper[4664]: I1013 07:22:46.491679 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" event={"ID":"0f92f225-d9b3-4d85-967e-b878358e05e4","Type":"ContainerStarted","Data":"40c93b9277c4ca318c7ce571f1b072f7f46fbc92ac19905b4323da1f90e203da"} Oct 13 07:22:46 crc kubenswrapper[4664]: I1013 07:22:46.511655 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" podStartSLOduration=2.064479413 podStartE2EDuration="2.511638165s" podCreationTimestamp="2025-10-13 07:22:44 +0000 UTC" firstStartedPulling="2025-10-13 07:22:45.483611406 +0000 UTC m=+2173.171056598" lastFinishedPulling="2025-10-13 07:22:45.930770148 +0000 UTC m=+2173.618215350" observedRunningTime="2025-10-13 07:22:46.50400673 +0000 UTC m=+2174.191451922" watchObservedRunningTime="2025-10-13 07:22:46.511638165 +0000 UTC m=+2174.199083357" Oct 13 07:22:58 crc kubenswrapper[4664]: I1013 07:22:58.812282 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:22:58 crc kubenswrapper[4664]: I1013 07:22:58.813038 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:22:58 crc kubenswrapper[4664]: I1013 07:22:58.813099 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:22:58 crc kubenswrapper[4664]: I1013 07:22:58.814039 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"938daf42a7410fde17245b603de7559e885fae294e70ce56c0b6d246481b84f1"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:22:58 crc kubenswrapper[4664]: I1013 07:22:58.814114 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://938daf42a7410fde17245b603de7559e885fae294e70ce56c0b6d246481b84f1" gracePeriod=600 Oct 13 07:22:59 crc kubenswrapper[4664]: I1013 07:22:59.624552 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="938daf42a7410fde17245b603de7559e885fae294e70ce56c0b6d246481b84f1" exitCode=0 Oct 13 07:22:59 crc kubenswrapper[4664]: I1013 07:22:59.624689 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"938daf42a7410fde17245b603de7559e885fae294e70ce56c0b6d246481b84f1"} Oct 13 07:22:59 crc kubenswrapper[4664]: I1013 07:22:59.625048 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c"} Oct 13 07:22:59 crc kubenswrapper[4664]: I1013 07:22:59.625076 4664 scope.go:117] "RemoveContainer" containerID="b38a7140ff43217165aff12a10f0d3767fd2b0fb0462fbdbb6bf7cbc77c6add5" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.354107 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6rrvj"] Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.358346 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.374099 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6rrvj"] Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.526476 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-catalog-content\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.526527 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-utilities\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.526592 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/d74b6cf7-5c5f-42b5-83be-e9301acb627b-kube-api-access-pw77d\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.628825 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-catalog-content\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.628880 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-utilities\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.628960 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/d74b6cf7-5c5f-42b5-83be-e9301acb627b-kube-api-access-pw77d\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.629726 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-catalog-content\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.629976 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-utilities\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.671238 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/d74b6cf7-5c5f-42b5-83be-e9301acb627b-kube-api-access-pw77d\") pod \"certified-operators-6rrvj\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:29 crc kubenswrapper[4664]: I1013 07:24:29.690050 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:30 crc kubenswrapper[4664]: I1013 07:24:30.077551 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6rrvj"] Oct 13 07:24:30 crc kubenswrapper[4664]: I1013 07:24:30.518042 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rrvj" event={"ID":"d74b6cf7-5c5f-42b5-83be-e9301acb627b","Type":"ContainerDied","Data":"26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944"} Oct 13 07:24:30 crc kubenswrapper[4664]: I1013 07:24:30.517888 4664 generic.go:334] "Generic (PLEG): container finished" podID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerID="26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944" exitCode=0 Oct 13 07:24:30 crc kubenswrapper[4664]: I1013 07:24:30.518691 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rrvj" event={"ID":"d74b6cf7-5c5f-42b5-83be-e9301acb627b","Type":"ContainerStarted","Data":"2fb7f6eddc393cd8c1c192f470bc214661017501c39bfe45ac986ab83467eb34"} Oct 13 07:24:30 crc kubenswrapper[4664]: I1013 07:24:30.525721 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:24:31 crc kubenswrapper[4664]: I1013 07:24:31.528284 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rrvj" event={"ID":"d74b6cf7-5c5f-42b5-83be-e9301acb627b","Type":"ContainerStarted","Data":"02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5"} Oct 13 07:24:33 crc kubenswrapper[4664]: I1013 07:24:33.544279 4664 generic.go:334] "Generic (PLEG): container finished" podID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerID="02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5" exitCode=0 Oct 13 07:24:33 crc kubenswrapper[4664]: I1013 07:24:33.544351 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rrvj" event={"ID":"d74b6cf7-5c5f-42b5-83be-e9301acb627b","Type":"ContainerDied","Data":"02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5"} Oct 13 07:24:34 crc kubenswrapper[4664]: I1013 07:24:34.562233 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rrvj" event={"ID":"d74b6cf7-5c5f-42b5-83be-e9301acb627b","Type":"ContainerStarted","Data":"b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348"} Oct 13 07:24:34 crc kubenswrapper[4664]: I1013 07:24:34.581031 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6rrvj" podStartSLOduration=2.150257233 podStartE2EDuration="5.581010803s" podCreationTimestamp="2025-10-13 07:24:29 +0000 UTC" firstStartedPulling="2025-10-13 07:24:30.523198514 +0000 UTC m=+2278.210643726" lastFinishedPulling="2025-10-13 07:24:33.953952104 +0000 UTC m=+2281.641397296" observedRunningTime="2025-10-13 07:24:34.577768225 +0000 UTC m=+2282.265213427" watchObservedRunningTime="2025-10-13 07:24:34.581010803 +0000 UTC m=+2282.268456015" Oct 13 07:24:39 crc kubenswrapper[4664]: I1013 07:24:39.691693 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:39 crc kubenswrapper[4664]: I1013 07:24:39.692231 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:39 crc kubenswrapper[4664]: I1013 07:24:39.740248 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:40 crc kubenswrapper[4664]: I1013 07:24:40.669242 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:40 crc kubenswrapper[4664]: I1013 07:24:40.727869 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6rrvj"] Oct 13 07:24:42 crc kubenswrapper[4664]: I1013 07:24:42.640652 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6rrvj" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="registry-server" containerID="cri-o://b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348" gracePeriod=2 Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.277340 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.399121 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-catalog-content\") pod \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.399189 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-utilities\") pod \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.399271 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/d74b6cf7-5c5f-42b5-83be-e9301acb627b-kube-api-access-pw77d\") pod \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\" (UID: \"d74b6cf7-5c5f-42b5-83be-e9301acb627b\") " Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.400295 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-utilities" (OuterVolumeSpecName: "utilities") pod "d74b6cf7-5c5f-42b5-83be-e9301acb627b" (UID: "d74b6cf7-5c5f-42b5-83be-e9301acb627b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.405756 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d74b6cf7-5c5f-42b5-83be-e9301acb627b-kube-api-access-pw77d" (OuterVolumeSpecName: "kube-api-access-pw77d") pod "d74b6cf7-5c5f-42b5-83be-e9301acb627b" (UID: "d74b6cf7-5c5f-42b5-83be-e9301acb627b"). InnerVolumeSpecName "kube-api-access-pw77d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.453320 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d74b6cf7-5c5f-42b5-83be-e9301acb627b" (UID: "d74b6cf7-5c5f-42b5-83be-e9301acb627b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.501048 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.501075 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d74b6cf7-5c5f-42b5-83be-e9301acb627b-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.501085 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/d74b6cf7-5c5f-42b5-83be-e9301acb627b-kube-api-access-pw77d\") on node \"crc\" DevicePath \"\"" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.657708 4664 generic.go:334] "Generic (PLEG): container finished" podID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerID="b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348" exitCode=0 Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.657759 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rrvj" event={"ID":"d74b6cf7-5c5f-42b5-83be-e9301acb627b","Type":"ContainerDied","Data":"b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348"} Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.657832 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rrvj" event={"ID":"d74b6cf7-5c5f-42b5-83be-e9301acb627b","Type":"ContainerDied","Data":"2fb7f6eddc393cd8c1c192f470bc214661017501c39bfe45ac986ab83467eb34"} Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.657856 4664 scope.go:117] "RemoveContainer" containerID="b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.658020 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rrvj" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.696562 4664 scope.go:117] "RemoveContainer" containerID="02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.697926 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6rrvj"] Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.718327 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6rrvj"] Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.720430 4664 scope.go:117] "RemoveContainer" containerID="26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.775414 4664 scope.go:117] "RemoveContainer" containerID="b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348" Oct 13 07:24:43 crc kubenswrapper[4664]: E1013 07:24:43.775981 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348\": container with ID starting with b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348 not found: ID does not exist" containerID="b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.776013 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348"} err="failed to get container status \"b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348\": rpc error: code = NotFound desc = could not find container \"b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348\": container with ID starting with b7f93d9b68aa7aa74eed2897a112529d6273232c7496045a40fbfc3b3a276348 not found: ID does not exist" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.776040 4664 scope.go:117] "RemoveContainer" containerID="02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5" Oct 13 07:24:43 crc kubenswrapper[4664]: E1013 07:24:43.776370 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5\": container with ID starting with 02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5 not found: ID does not exist" containerID="02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.776412 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5"} err="failed to get container status \"02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5\": rpc error: code = NotFound desc = could not find container \"02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5\": container with ID starting with 02959ad0e73a2886da6d5e28f713803da58d4de1d957258dae2836e790562fb5 not found: ID does not exist" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.776442 4664 scope.go:117] "RemoveContainer" containerID="26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944" Oct 13 07:24:43 crc kubenswrapper[4664]: E1013 07:24:43.776732 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944\": container with ID starting with 26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944 not found: ID does not exist" containerID="26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944" Oct 13 07:24:43 crc kubenswrapper[4664]: I1013 07:24:43.776757 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944"} err="failed to get container status \"26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944\": rpc error: code = NotFound desc = could not find container \"26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944\": container with ID starting with 26a7e03cbfdbb3c318c5a0692e3449e09e5561590719e85923c0f2e9bd650944 not found: ID does not exist" Oct 13 07:24:45 crc kubenswrapper[4664]: I1013 07:24:45.065299 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" path="/var/lib/kubelet/pods/d74b6cf7-5c5f-42b5-83be-e9301acb627b/volumes" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.800228 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rhdw2"] Oct 13 07:25:18 crc kubenswrapper[4664]: E1013 07:25:18.803328 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="registry-server" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.803385 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="registry-server" Oct 13 07:25:18 crc kubenswrapper[4664]: E1013 07:25:18.803418 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="extract-content" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.803425 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="extract-content" Oct 13 07:25:18 crc kubenswrapper[4664]: E1013 07:25:18.803451 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="extract-utilities" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.803457 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="extract-utilities" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.803674 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="d74b6cf7-5c5f-42b5-83be-e9301acb627b" containerName="registry-server" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.805200 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.813412 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rhdw2"] Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.895248 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-catalog-content\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.895357 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-utilities\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.895410 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vh2c\" (UniqueName: \"kubernetes.io/projected/19534c1c-5299-49f3-8ab5-e272a4f45d37-kube-api-access-6vh2c\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.997324 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-catalog-content\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.997439 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-utilities\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.997495 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vh2c\" (UniqueName: \"kubernetes.io/projected/19534c1c-5299-49f3-8ab5-e272a4f45d37-kube-api-access-6vh2c\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.998006 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-catalog-content\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:18 crc kubenswrapper[4664]: I1013 07:25:18.998028 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-utilities\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:19 crc kubenswrapper[4664]: I1013 07:25:19.029097 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vh2c\" (UniqueName: \"kubernetes.io/projected/19534c1c-5299-49f3-8ab5-e272a4f45d37-kube-api-access-6vh2c\") pod \"community-operators-rhdw2\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:19 crc kubenswrapper[4664]: I1013 07:25:19.131313 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:19 crc kubenswrapper[4664]: I1013 07:25:19.700182 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rhdw2"] Oct 13 07:25:19 crc kubenswrapper[4664]: I1013 07:25:19.981780 4664 generic.go:334] "Generic (PLEG): container finished" podID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerID="ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5" exitCode=0 Oct 13 07:25:19 crc kubenswrapper[4664]: I1013 07:25:19.981890 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rhdw2" event={"ID":"19534c1c-5299-49f3-8ab5-e272a4f45d37","Type":"ContainerDied","Data":"ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5"} Oct 13 07:25:19 crc kubenswrapper[4664]: I1013 07:25:19.982825 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rhdw2" event={"ID":"19534c1c-5299-49f3-8ab5-e272a4f45d37","Type":"ContainerStarted","Data":"a9ede4310f9f1c2a031cd3e0122b4868e46d4c231ccf5881765c1214eee4396b"} Oct 13 07:25:20 crc kubenswrapper[4664]: I1013 07:25:20.996544 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rhdw2" event={"ID":"19534c1c-5299-49f3-8ab5-e272a4f45d37","Type":"ContainerStarted","Data":"fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff"} Oct 13 07:25:23 crc kubenswrapper[4664]: I1013 07:25:23.047133 4664 generic.go:334] "Generic (PLEG): container finished" podID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerID="fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff" exitCode=0 Oct 13 07:25:23 crc kubenswrapper[4664]: I1013 07:25:23.061933 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rhdw2" event={"ID":"19534c1c-5299-49f3-8ab5-e272a4f45d37","Type":"ContainerDied","Data":"fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff"} Oct 13 07:25:24 crc kubenswrapper[4664]: I1013 07:25:24.057925 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rhdw2" event={"ID":"19534c1c-5299-49f3-8ab5-e272a4f45d37","Type":"ContainerStarted","Data":"1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f"} Oct 13 07:25:24 crc kubenswrapper[4664]: I1013 07:25:24.079904 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rhdw2" podStartSLOduration=2.4836401869999998 podStartE2EDuration="6.079885188s" podCreationTimestamp="2025-10-13 07:25:18 +0000 UTC" firstStartedPulling="2025-10-13 07:25:19.984005288 +0000 UTC m=+2327.671450510" lastFinishedPulling="2025-10-13 07:25:23.580250319 +0000 UTC m=+2331.267695511" observedRunningTime="2025-10-13 07:25:24.079141628 +0000 UTC m=+2331.766586820" watchObservedRunningTime="2025-10-13 07:25:24.079885188 +0000 UTC m=+2331.767330390" Oct 13 07:25:28 crc kubenswrapper[4664]: I1013 07:25:28.812018 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:25:28 crc kubenswrapper[4664]: I1013 07:25:28.813598 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:25:29 crc kubenswrapper[4664]: I1013 07:25:29.131927 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:29 crc kubenswrapper[4664]: I1013 07:25:29.132768 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:29 crc kubenswrapper[4664]: I1013 07:25:29.202085 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:30 crc kubenswrapper[4664]: I1013 07:25:30.154935 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:30 crc kubenswrapper[4664]: I1013 07:25:30.203903 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rhdw2"] Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.124760 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rhdw2" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="registry-server" containerID="cri-o://1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f" gracePeriod=2 Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.578101 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.757585 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vh2c\" (UniqueName: \"kubernetes.io/projected/19534c1c-5299-49f3-8ab5-e272a4f45d37-kube-api-access-6vh2c\") pod \"19534c1c-5299-49f3-8ab5-e272a4f45d37\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.758146 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-catalog-content\") pod \"19534c1c-5299-49f3-8ab5-e272a4f45d37\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.758585 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-utilities\") pod \"19534c1c-5299-49f3-8ab5-e272a4f45d37\" (UID: \"19534c1c-5299-49f3-8ab5-e272a4f45d37\") " Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.759969 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-utilities" (OuterVolumeSpecName: "utilities") pod "19534c1c-5299-49f3-8ab5-e272a4f45d37" (UID: "19534c1c-5299-49f3-8ab5-e272a4f45d37"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.760659 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.765585 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19534c1c-5299-49f3-8ab5-e272a4f45d37-kube-api-access-6vh2c" (OuterVolumeSpecName: "kube-api-access-6vh2c") pod "19534c1c-5299-49f3-8ab5-e272a4f45d37" (UID: "19534c1c-5299-49f3-8ab5-e272a4f45d37"). InnerVolumeSpecName "kube-api-access-6vh2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.816366 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "19534c1c-5299-49f3-8ab5-e272a4f45d37" (UID: "19534c1c-5299-49f3-8ab5-e272a4f45d37"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.863362 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vh2c\" (UniqueName: \"kubernetes.io/projected/19534c1c-5299-49f3-8ab5-e272a4f45d37-kube-api-access-6vh2c\") on node \"crc\" DevicePath \"\"" Oct 13 07:25:32 crc kubenswrapper[4664]: I1013 07:25:32.863450 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19534c1c-5299-49f3-8ab5-e272a4f45d37-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.140280 4664 generic.go:334] "Generic (PLEG): container finished" podID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerID="1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f" exitCode=0 Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.140322 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rhdw2" event={"ID":"19534c1c-5299-49f3-8ab5-e272a4f45d37","Type":"ContainerDied","Data":"1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f"} Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.140329 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rhdw2" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.140345 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rhdw2" event={"ID":"19534c1c-5299-49f3-8ab5-e272a4f45d37","Type":"ContainerDied","Data":"a9ede4310f9f1c2a031cd3e0122b4868e46d4c231ccf5881765c1214eee4396b"} Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.140359 4664 scope.go:117] "RemoveContainer" containerID="1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.170033 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rhdw2"] Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.174434 4664 scope.go:117] "RemoveContainer" containerID="fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.175083 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rhdw2"] Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.212756 4664 scope.go:117] "RemoveContainer" containerID="ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.252608 4664 scope.go:117] "RemoveContainer" containerID="1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f" Oct 13 07:25:33 crc kubenswrapper[4664]: E1013 07:25:33.253279 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f\": container with ID starting with 1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f not found: ID does not exist" containerID="1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.253318 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f"} err="failed to get container status \"1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f\": rpc error: code = NotFound desc = could not find container \"1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f\": container with ID starting with 1115eacb7bbb58ea7a4cd4bccc95c7671fc17103a6c58649d89aa30eb142b01f not found: ID does not exist" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.253345 4664 scope.go:117] "RemoveContainer" containerID="fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff" Oct 13 07:25:33 crc kubenswrapper[4664]: E1013 07:25:33.253811 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff\": container with ID starting with fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff not found: ID does not exist" containerID="fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.253837 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff"} err="failed to get container status \"fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff\": rpc error: code = NotFound desc = could not find container \"fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff\": container with ID starting with fd451c30c208586625077442f0ca59b52b7a681be4ddac4d5ad08345ce6c24ff not found: ID does not exist" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.253850 4664 scope.go:117] "RemoveContainer" containerID="ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5" Oct 13 07:25:33 crc kubenswrapper[4664]: E1013 07:25:33.254262 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5\": container with ID starting with ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5 not found: ID does not exist" containerID="ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5" Oct 13 07:25:33 crc kubenswrapper[4664]: I1013 07:25:33.254281 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5"} err="failed to get container status \"ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5\": rpc error: code = NotFound desc = could not find container \"ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5\": container with ID starting with ef431aafdf1d3c042f92b64ec964d259016a6c1be845d01d9c62a0226c1505d5 not found: ID does not exist" Oct 13 07:25:35 crc kubenswrapper[4664]: I1013 07:25:35.059469 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" path="/var/lib/kubelet/pods/19534c1c-5299-49f3-8ab5-e272a4f45d37/volumes" Oct 13 07:25:58 crc kubenswrapper[4664]: I1013 07:25:58.812305 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:25:58 crc kubenswrapper[4664]: I1013 07:25:58.812817 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:26:28 crc kubenswrapper[4664]: I1013 07:26:28.812250 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:26:28 crc kubenswrapper[4664]: I1013 07:26:28.812678 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:26:28 crc kubenswrapper[4664]: I1013 07:26:28.812722 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:26:28 crc kubenswrapper[4664]: I1013 07:26:28.813195 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:26:28 crc kubenswrapper[4664]: I1013 07:26:28.813244 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" gracePeriod=600 Oct 13 07:26:28 crc kubenswrapper[4664]: E1013 07:26:28.932436 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:26:29 crc kubenswrapper[4664]: I1013 07:26:29.784020 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" exitCode=0 Oct 13 07:26:29 crc kubenswrapper[4664]: I1013 07:26:29.784077 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c"} Oct 13 07:26:29 crc kubenswrapper[4664]: I1013 07:26:29.784183 4664 scope.go:117] "RemoveContainer" containerID="938daf42a7410fde17245b603de7559e885fae294e70ce56c0b6d246481b84f1" Oct 13 07:26:29 crc kubenswrapper[4664]: I1013 07:26:29.788146 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:26:29 crc kubenswrapper[4664]: E1013 07:26:29.790388 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:26:42 crc kubenswrapper[4664]: I1013 07:26:42.047020 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:26:42 crc kubenswrapper[4664]: E1013 07:26:42.048919 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:26:53 crc kubenswrapper[4664]: I1013 07:26:53.053077 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:26:53 crc kubenswrapper[4664]: E1013 07:26:53.054006 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:27:03 crc kubenswrapper[4664]: I1013 07:27:03.122422 4664 generic.go:334] "Generic (PLEG): container finished" podID="0f92f225-d9b3-4d85-967e-b878358e05e4" containerID="298c9168d244b548151678aa35cd8a863f0ab29cd457ca74f4105858c6814d8b" exitCode=0 Oct 13 07:27:03 crc kubenswrapper[4664]: I1013 07:27:03.122691 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" event={"ID":"0f92f225-d9b3-4d85-967e-b878358e05e4","Type":"ContainerDied","Data":"298c9168d244b548151678aa35cd8a863f0ab29cd457ca74f4105858c6814d8b"} Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.047723 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:27:04 crc kubenswrapper[4664]: E1013 07:27:04.048059 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.556849 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.687386 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-secret-0\") pod \"0f92f225-d9b3-4d85-967e-b878358e05e4\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.687491 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-combined-ca-bundle\") pod \"0f92f225-d9b3-4d85-967e-b878358e05e4\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.687535 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-inventory\") pod \"0f92f225-d9b3-4d85-967e-b878358e05e4\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.687654 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-ssh-key\") pod \"0f92f225-d9b3-4d85-967e-b878358e05e4\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.687733 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4znv9\" (UniqueName: \"kubernetes.io/projected/0f92f225-d9b3-4d85-967e-b878358e05e4-kube-api-access-4znv9\") pod \"0f92f225-d9b3-4d85-967e-b878358e05e4\" (UID: \"0f92f225-d9b3-4d85-967e-b878358e05e4\") " Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.694729 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "0f92f225-d9b3-4d85-967e-b878358e05e4" (UID: "0f92f225-d9b3-4d85-967e-b878358e05e4"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.694980 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f92f225-d9b3-4d85-967e-b878358e05e4-kube-api-access-4znv9" (OuterVolumeSpecName: "kube-api-access-4znv9") pod "0f92f225-d9b3-4d85-967e-b878358e05e4" (UID: "0f92f225-d9b3-4d85-967e-b878358e05e4"). InnerVolumeSpecName "kube-api-access-4znv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.724942 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0f92f225-d9b3-4d85-967e-b878358e05e4" (UID: "0f92f225-d9b3-4d85-967e-b878358e05e4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.725916 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-inventory" (OuterVolumeSpecName: "inventory") pod "0f92f225-d9b3-4d85-967e-b878358e05e4" (UID: "0f92f225-d9b3-4d85-967e-b878358e05e4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.731231 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "0f92f225-d9b3-4d85-967e-b878358e05e4" (UID: "0f92f225-d9b3-4d85-967e-b878358e05e4"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.790248 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.790299 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4znv9\" (UniqueName: \"kubernetes.io/projected/0f92f225-d9b3-4d85-967e-b878358e05e4-kube-api-access-4znv9\") on node \"crc\" DevicePath \"\"" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.790315 4664 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.790328 4664 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:27:04 crc kubenswrapper[4664]: I1013 07:27:04.790340 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f92f225-d9b3-4d85-967e-b878358e05e4-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.144937 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" event={"ID":"0f92f225-d9b3-4d85-967e-b878358e05e4","Type":"ContainerDied","Data":"40c93b9277c4ca318c7ce571f1b072f7f46fbc92ac19905b4323da1f90e203da"} Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.145366 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40c93b9277c4ca318c7ce571f1b072f7f46fbc92ac19905b4323da1f90e203da" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.145081 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.272612 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p"] Oct 13 07:27:05 crc kubenswrapper[4664]: E1013 07:27:05.273868 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="registry-server" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.273892 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="registry-server" Oct 13 07:27:05 crc kubenswrapper[4664]: E1013 07:27:05.273905 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f92f225-d9b3-4d85-967e-b878358e05e4" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.273986 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f92f225-d9b3-4d85-967e-b878358e05e4" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 13 07:27:05 crc kubenswrapper[4664]: E1013 07:27:05.274003 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="extract-content" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.274012 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="extract-content" Oct 13 07:27:05 crc kubenswrapper[4664]: E1013 07:27:05.274038 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="extract-utilities" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.274044 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="extract-utilities" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.274245 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f92f225-d9b3-4d85-967e-b878358e05e4" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.274270 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="19534c1c-5299-49f3-8ab5-e272a4f45d37" containerName="registry-server" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.274897 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.277212 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.278307 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.278353 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.278652 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.278656 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.281103 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.281315 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.292452 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p"] Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410283 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410328 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410409 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410446 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410464 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410490 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410547 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410612 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.410690 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbt26\" (UniqueName: \"kubernetes.io/projected/85584bbe-e8c0-4359-a828-731d3bc2cd5f-kube-api-access-mbt26\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.511844 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.513721 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbt26\" (UniqueName: \"kubernetes.io/projected/85584bbe-e8c0-4359-a828-731d3bc2cd5f-kube-api-access-mbt26\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.513863 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.513899 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.513954 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.514075 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.514102 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.514132 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.514228 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.514771 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.520007 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.522367 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.522767 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.523203 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.523942 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.524660 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.527506 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.547016 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbt26\" (UniqueName: \"kubernetes.io/projected/85584bbe-e8c0-4359-a828-731d3bc2cd5f-kube-api-access-mbt26\") pod \"nova-edpm-deployment-openstack-edpm-ipam-qg68p\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:05 crc kubenswrapper[4664]: I1013 07:27:05.614891 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:27:06 crc kubenswrapper[4664]: I1013 07:27:06.197603 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p"] Oct 13 07:27:07 crc kubenswrapper[4664]: I1013 07:27:07.163622 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" event={"ID":"85584bbe-e8c0-4359-a828-731d3bc2cd5f","Type":"ContainerStarted","Data":"7f110547e037d634a6f8aaf13cf29588e489f43b1b407a07efdad0636d3b9713"} Oct 13 07:27:07 crc kubenswrapper[4664]: I1013 07:27:07.164127 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" event={"ID":"85584bbe-e8c0-4359-a828-731d3bc2cd5f","Type":"ContainerStarted","Data":"fd6d32c1abc2cdb6d77d31e424fb01385efd735e432455f06df4b560773d939b"} Oct 13 07:27:07 crc kubenswrapper[4664]: I1013 07:27:07.182560 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" podStartSLOduration=1.636747468 podStartE2EDuration="2.182540596s" podCreationTimestamp="2025-10-13 07:27:05 +0000 UTC" firstStartedPulling="2025-10-13 07:27:06.198751077 +0000 UTC m=+2433.886196269" lastFinishedPulling="2025-10-13 07:27:06.744544205 +0000 UTC m=+2434.431989397" observedRunningTime="2025-10-13 07:27:07.178469236 +0000 UTC m=+2434.865914448" watchObservedRunningTime="2025-10-13 07:27:07.182540596 +0000 UTC m=+2434.869985788" Oct 13 07:27:19 crc kubenswrapper[4664]: I1013 07:27:19.047583 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:27:19 crc kubenswrapper[4664]: E1013 07:27:19.048509 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:27:34 crc kubenswrapper[4664]: I1013 07:27:34.047454 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:27:34 crc kubenswrapper[4664]: E1013 07:27:34.048298 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:27:45 crc kubenswrapper[4664]: I1013 07:27:45.047068 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:27:45 crc kubenswrapper[4664]: E1013 07:27:45.047961 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:27:58 crc kubenswrapper[4664]: I1013 07:27:58.047048 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:27:58 crc kubenswrapper[4664]: E1013 07:27:58.047760 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:28:12 crc kubenswrapper[4664]: I1013 07:28:12.046917 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:28:12 crc kubenswrapper[4664]: E1013 07:28:12.047785 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:28:26 crc kubenswrapper[4664]: I1013 07:28:26.047705 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:28:26 crc kubenswrapper[4664]: E1013 07:28:26.048645 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:28:40 crc kubenswrapper[4664]: I1013 07:28:40.047416 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:28:40 crc kubenswrapper[4664]: E1013 07:28:40.048579 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:28:55 crc kubenswrapper[4664]: I1013 07:28:55.047504 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:28:55 crc kubenswrapper[4664]: E1013 07:28:55.048363 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:29:08 crc kubenswrapper[4664]: I1013 07:29:08.047594 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:29:08 crc kubenswrapper[4664]: E1013 07:29:08.048532 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:29:20 crc kubenswrapper[4664]: I1013 07:29:20.047877 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:29:20 crc kubenswrapper[4664]: E1013 07:29:20.048645 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:29:34 crc kubenswrapper[4664]: I1013 07:29:34.047072 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:29:34 crc kubenswrapper[4664]: E1013 07:29:34.047741 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:29:46 crc kubenswrapper[4664]: I1013 07:29:46.046523 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:29:46 crc kubenswrapper[4664]: E1013 07:29:46.047170 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.144728 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb"] Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.148130 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.150263 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.150706 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.164157 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb"] Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.295463 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f363fe6-c902-4860-a3d9-4e1d39803e17-secret-volume\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.295598 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f363fe6-c902-4860-a3d9-4e1d39803e17-config-volume\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.295859 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22sdl\" (UniqueName: \"kubernetes.io/projected/5f363fe6-c902-4860-a3d9-4e1d39803e17-kube-api-access-22sdl\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.398370 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f363fe6-c902-4860-a3d9-4e1d39803e17-secret-volume\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.398505 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f363fe6-c902-4860-a3d9-4e1d39803e17-config-volume\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.398663 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22sdl\" (UniqueName: \"kubernetes.io/projected/5f363fe6-c902-4860-a3d9-4e1d39803e17-kube-api-access-22sdl\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.400444 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f363fe6-c902-4860-a3d9-4e1d39803e17-config-volume\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.411681 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f363fe6-c902-4860-a3d9-4e1d39803e17-secret-volume\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.427623 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22sdl\" (UniqueName: \"kubernetes.io/projected/5f363fe6-c902-4860-a3d9-4e1d39803e17-kube-api-access-22sdl\") pod \"collect-profiles-29339010-mkrsb\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.473659 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:00 crc kubenswrapper[4664]: I1013 07:30:00.981723 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb"] Oct 13 07:30:01 crc kubenswrapper[4664]: I1013 07:30:01.046928 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:30:01 crc kubenswrapper[4664]: E1013 07:30:01.047162 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:30:01 crc kubenswrapper[4664]: I1013 07:30:01.911073 4664 generic.go:334] "Generic (PLEG): container finished" podID="5f363fe6-c902-4860-a3d9-4e1d39803e17" containerID="89e0305340ea2ea3fa70058fd26333b323f2f38aac8903c75ff514000f2df269" exitCode=0 Oct 13 07:30:01 crc kubenswrapper[4664]: I1013 07:30:01.911433 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" event={"ID":"5f363fe6-c902-4860-a3d9-4e1d39803e17","Type":"ContainerDied","Data":"89e0305340ea2ea3fa70058fd26333b323f2f38aac8903c75ff514000f2df269"} Oct 13 07:30:01 crc kubenswrapper[4664]: I1013 07:30:01.911468 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" event={"ID":"5f363fe6-c902-4860-a3d9-4e1d39803e17","Type":"ContainerStarted","Data":"82d877fa768922f4541a13a9e2e2dce982316cdd3ff641e66f3ac41a7e303e80"} Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.363094 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.463111 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f363fe6-c902-4860-a3d9-4e1d39803e17-config-volume\") pod \"5f363fe6-c902-4860-a3d9-4e1d39803e17\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.463573 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22sdl\" (UniqueName: \"kubernetes.io/projected/5f363fe6-c902-4860-a3d9-4e1d39803e17-kube-api-access-22sdl\") pod \"5f363fe6-c902-4860-a3d9-4e1d39803e17\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.463704 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f363fe6-c902-4860-a3d9-4e1d39803e17-secret-volume\") pod \"5f363fe6-c902-4860-a3d9-4e1d39803e17\" (UID: \"5f363fe6-c902-4860-a3d9-4e1d39803e17\") " Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.463941 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f363fe6-c902-4860-a3d9-4e1d39803e17-config-volume" (OuterVolumeSpecName: "config-volume") pod "5f363fe6-c902-4860-a3d9-4e1d39803e17" (UID: "5f363fe6-c902-4860-a3d9-4e1d39803e17"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.464325 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f363fe6-c902-4860-a3d9-4e1d39803e17-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.469183 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f363fe6-c902-4860-a3d9-4e1d39803e17-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5f363fe6-c902-4860-a3d9-4e1d39803e17" (UID: "5f363fe6-c902-4860-a3d9-4e1d39803e17"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.472015 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f363fe6-c902-4860-a3d9-4e1d39803e17-kube-api-access-22sdl" (OuterVolumeSpecName: "kube-api-access-22sdl") pod "5f363fe6-c902-4860-a3d9-4e1d39803e17" (UID: "5f363fe6-c902-4860-a3d9-4e1d39803e17"). InnerVolumeSpecName "kube-api-access-22sdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.566077 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22sdl\" (UniqueName: \"kubernetes.io/projected/5f363fe6-c902-4860-a3d9-4e1d39803e17-kube-api-access-22sdl\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.566110 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f363fe6-c902-4860-a3d9-4e1d39803e17-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.944961 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" event={"ID":"5f363fe6-c902-4860-a3d9-4e1d39803e17","Type":"ContainerDied","Data":"82d877fa768922f4541a13a9e2e2dce982316cdd3ff641e66f3ac41a7e303e80"} Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.945000 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82d877fa768922f4541a13a9e2e2dce982316cdd3ff641e66f3ac41a7e303e80" Oct 13 07:30:03 crc kubenswrapper[4664]: I1013 07:30:03.945036 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb" Oct 13 07:30:04 crc kubenswrapper[4664]: I1013 07:30:04.436688 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49"] Oct 13 07:30:04 crc kubenswrapper[4664]: I1013 07:30:04.445463 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338965-nsh49"] Oct 13 07:30:05 crc kubenswrapper[4664]: I1013 07:30:05.068086 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89073c83-c4b0-460e-8011-433081541325" path="/var/lib/kubelet/pods/89073c83-c4b0-460e-8011-433081541325/volumes" Oct 13 07:30:16 crc kubenswrapper[4664]: I1013 07:30:16.046727 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:30:16 crc kubenswrapper[4664]: E1013 07:30:16.047487 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:30:18 crc kubenswrapper[4664]: I1013 07:30:18.809030 4664 scope.go:117] "RemoveContainer" containerID="894ab5deb891e5dfa91d2b388b8ac0e959f15d17893bf720e30f7646bb590257" Oct 13 07:30:28 crc kubenswrapper[4664]: I1013 07:30:28.047769 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:30:28 crc kubenswrapper[4664]: E1013 07:30:28.049072 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:30:41 crc kubenswrapper[4664]: I1013 07:30:41.046961 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:30:41 crc kubenswrapper[4664]: E1013 07:30:41.047784 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:30:45 crc kubenswrapper[4664]: I1013 07:30:45.324601 4664 generic.go:334] "Generic (PLEG): container finished" podID="85584bbe-e8c0-4359-a828-731d3bc2cd5f" containerID="7f110547e037d634a6f8aaf13cf29588e489f43b1b407a07efdad0636d3b9713" exitCode=0 Oct 13 07:30:45 crc kubenswrapper[4664]: I1013 07:30:45.324819 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" event={"ID":"85584bbe-e8c0-4359-a828-731d3bc2cd5f","Type":"ContainerDied","Data":"7f110547e037d634a6f8aaf13cf29588e489f43b1b407a07efdad0636d3b9713"} Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.779351 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.893654 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbt26\" (UniqueName: \"kubernetes.io/projected/85584bbe-e8c0-4359-a828-731d3bc2cd5f-kube-api-access-mbt26\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.893940 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-extra-config-0\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.893988 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-0\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.894023 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-ssh-key\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.894051 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-combined-ca-bundle\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.894102 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-1\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.894194 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-inventory\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.894219 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-1\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.894240 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.925587 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85584bbe-e8c0-4359-a828-731d3bc2cd5f-kube-api-access-mbt26" (OuterVolumeSpecName: "kube-api-access-mbt26") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "kube-api-access-mbt26". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.961270 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:46 crc kubenswrapper[4664]: I1013 07:30:46.997779 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:46.998378 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0\") pod \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\" (UID: \"85584bbe-e8c0-4359-a828-731d3bc2cd5f\") " Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:46.998899 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbt26\" (UniqueName: \"kubernetes.io/projected/85584bbe-e8c0-4359-a828-731d3bc2cd5f-kube-api-access-mbt26\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:46.998912 4664 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: W1013 07:30:47.000115 4664 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/85584bbe-e8c0-4359-a828-731d3bc2cd5f/volumes/kubernetes.io~secret/nova-cell1-compute-config-0 Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.000634 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.009965 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.023032 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.053861 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.076729 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-inventory" (OuterVolumeSpecName: "inventory") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.102367 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.102402 4664 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.102413 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.102421 4664 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.102430 4664 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.118149 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.123007 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "85584bbe-e8c0-4359-a828-731d3bc2cd5f" (UID: "85584bbe-e8c0-4359-a828-731d3bc2cd5f"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.203745 4664 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.203773 4664 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/85584bbe-e8c0-4359-a828-731d3bc2cd5f-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.344744 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" event={"ID":"85584bbe-e8c0-4359-a828-731d3bc2cd5f","Type":"ContainerDied","Data":"fd6d32c1abc2cdb6d77d31e424fb01385efd735e432455f06df4b560773d939b"} Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.344853 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd6d32c1abc2cdb6d77d31e424fb01385efd735e432455f06df4b560773d939b" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.344883 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-qg68p" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.539570 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht"] Oct 13 07:30:47 crc kubenswrapper[4664]: E1013 07:30:47.540157 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f363fe6-c902-4860-a3d9-4e1d39803e17" containerName="collect-profiles" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.540186 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f363fe6-c902-4860-a3d9-4e1d39803e17" containerName="collect-profiles" Oct 13 07:30:47 crc kubenswrapper[4664]: E1013 07:30:47.540214 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85584bbe-e8c0-4359-a828-731d3bc2cd5f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.540222 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="85584bbe-e8c0-4359-a828-731d3bc2cd5f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.540420 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="85584bbe-e8c0-4359-a828-731d3bc2cd5f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.540446 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f363fe6-c902-4860-a3d9-4e1d39803e17" containerName="collect-profiles" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.541071 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.545593 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.545762 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.545935 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.546065 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.546110 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kswsn" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.572248 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht"] Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.609439 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6tx5\" (UniqueName: \"kubernetes.io/projected/618aef5f-9779-425d-9ce0-b827194143f4-kube-api-access-p6tx5\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.609495 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.609545 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.609576 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.609660 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.609688 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.609709 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.710871 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.710964 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.710985 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.711056 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6tx5\" (UniqueName: \"kubernetes.io/projected/618aef5f-9779-425d-9ce0-b827194143f4-kube-api-access-p6tx5\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.711093 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.711151 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.711192 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.716954 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.717028 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.725903 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.726052 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.727136 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.727174 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.729229 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6tx5\" (UniqueName: \"kubernetes.io/projected/618aef5f-9779-425d-9ce0-b827194143f4-kube-api-access-p6tx5\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zcjht\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:47 crc kubenswrapper[4664]: I1013 07:30:47.855776 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:30:48 crc kubenswrapper[4664]: I1013 07:30:48.455490 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht"] Oct 13 07:30:48 crc kubenswrapper[4664]: I1013 07:30:48.469013 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:30:49 crc kubenswrapper[4664]: I1013 07:30:49.364362 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" event={"ID":"618aef5f-9779-425d-9ce0-b827194143f4","Type":"ContainerStarted","Data":"e3217f8d96b3406da2b4fe38a4f3eb3f0808e3a9d723945e3b35325d3fe974b8"} Oct 13 07:30:50 crc kubenswrapper[4664]: I1013 07:30:50.375480 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" event={"ID":"618aef5f-9779-425d-9ce0-b827194143f4","Type":"ContainerStarted","Data":"c5be9398e7fbf6b98e9f6806a0731fa9bb65a9651bc1b40563e897be7a851987"} Oct 13 07:30:50 crc kubenswrapper[4664]: I1013 07:30:50.405699 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" podStartSLOduration=2.635160716 podStartE2EDuration="3.405678581s" podCreationTimestamp="2025-10-13 07:30:47 +0000 UTC" firstStartedPulling="2025-10-13 07:30:48.468716201 +0000 UTC m=+2656.156161403" lastFinishedPulling="2025-10-13 07:30:49.239234066 +0000 UTC m=+2656.926679268" observedRunningTime="2025-10-13 07:30:50.404630703 +0000 UTC m=+2658.092075905" watchObservedRunningTime="2025-10-13 07:30:50.405678581 +0000 UTC m=+2658.093123783" Oct 13 07:30:54 crc kubenswrapper[4664]: I1013 07:30:54.048256 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:30:54 crc kubenswrapper[4664]: E1013 07:30:54.048652 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:31:07 crc kubenswrapper[4664]: I1013 07:31:07.047579 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:31:07 crc kubenswrapper[4664]: E1013 07:31:07.048351 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:31:22 crc kubenswrapper[4664]: I1013 07:31:22.048106 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:31:22 crc kubenswrapper[4664]: E1013 07:31:22.048745 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:31:33 crc kubenswrapper[4664]: I1013 07:31:33.062565 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:31:33 crc kubenswrapper[4664]: I1013 07:31:33.796125 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"723a02a09e20f9e931cbe7400a3d4141c0cb5752da29e5cdf49ea790c6e70ae5"} Oct 13 07:32:47 crc kubenswrapper[4664]: I1013 07:32:47.850994 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lmz74"] Oct 13 07:32:47 crc kubenswrapper[4664]: I1013 07:32:47.854084 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:47 crc kubenswrapper[4664]: I1013 07:32:47.869567 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lmz74"] Oct 13 07:32:47 crc kubenswrapper[4664]: I1013 07:32:47.949756 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-catalog-content\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:47 crc kubenswrapper[4664]: I1013 07:32:47.949882 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jp6f\" (UniqueName: \"kubernetes.io/projected/90c29986-c915-4709-9fb8-f1055450cd9e-kube-api-access-9jp6f\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:47 crc kubenswrapper[4664]: I1013 07:32:47.950001 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-utilities\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.051171 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-utilities\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.051414 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-catalog-content\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.051460 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jp6f\" (UniqueName: \"kubernetes.io/projected/90c29986-c915-4709-9fb8-f1055450cd9e-kube-api-access-9jp6f\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.052089 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-utilities\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.052115 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-catalog-content\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.071866 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jp6f\" (UniqueName: \"kubernetes.io/projected/90c29986-c915-4709-9fb8-f1055450cd9e-kube-api-access-9jp6f\") pod \"redhat-operators-lmz74\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.181525 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:48 crc kubenswrapper[4664]: I1013 07:32:48.680976 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lmz74"] Oct 13 07:32:49 crc kubenswrapper[4664]: I1013 07:32:49.564384 4664 generic.go:334] "Generic (PLEG): container finished" podID="90c29986-c915-4709-9fb8-f1055450cd9e" containerID="c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5" exitCode=0 Oct 13 07:32:49 crc kubenswrapper[4664]: I1013 07:32:49.564855 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmz74" event={"ID":"90c29986-c915-4709-9fb8-f1055450cd9e","Type":"ContainerDied","Data":"c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5"} Oct 13 07:32:49 crc kubenswrapper[4664]: I1013 07:32:49.564900 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmz74" event={"ID":"90c29986-c915-4709-9fb8-f1055450cd9e","Type":"ContainerStarted","Data":"a8ec1d7cdcd1cf7f1c259a85601b2aa94fed4c897d192643bbc230d2cc6eeb79"} Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.432302 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6nvzs"] Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.434917 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.454467 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nvzs"] Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.577283 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmz74" event={"ID":"90c29986-c915-4709-9fb8-f1055450cd9e","Type":"ContainerStarted","Data":"e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc"} Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.611920 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-utilities\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.611986 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-catalog-content\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.612154 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x66zv\" (UniqueName: \"kubernetes.io/projected/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-kube-api-access-x66zv\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.713628 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-utilities\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.713892 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-catalog-content\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.714163 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x66zv\" (UniqueName: \"kubernetes.io/projected/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-kube-api-access-x66zv\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.715031 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-utilities\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.715066 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-catalog-content\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.745610 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x66zv\" (UniqueName: \"kubernetes.io/projected/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-kube-api-access-x66zv\") pod \"redhat-marketplace-6nvzs\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:50 crc kubenswrapper[4664]: I1013 07:32:50.800912 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:32:51 crc kubenswrapper[4664]: I1013 07:32:51.269835 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nvzs"] Oct 13 07:32:51 crc kubenswrapper[4664]: I1013 07:32:51.585975 4664 generic.go:334] "Generic (PLEG): container finished" podID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerID="3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56" exitCode=0 Oct 13 07:32:51 crc kubenswrapper[4664]: I1013 07:32:51.586231 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nvzs" event={"ID":"fff7037c-8c78-416b-a6a0-fa8eade0d9c8","Type":"ContainerDied","Data":"3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56"} Oct 13 07:32:51 crc kubenswrapper[4664]: I1013 07:32:51.587412 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nvzs" event={"ID":"fff7037c-8c78-416b-a6a0-fa8eade0d9c8","Type":"ContainerStarted","Data":"fff55a96f64d57df5861f42a24207956b43e363b62e9e5635922a2472d8b82f2"} Oct 13 07:32:52 crc kubenswrapper[4664]: I1013 07:32:52.598671 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nvzs" event={"ID":"fff7037c-8c78-416b-a6a0-fa8eade0d9c8","Type":"ContainerStarted","Data":"3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed"} Oct 13 07:32:53 crc kubenswrapper[4664]: I1013 07:32:53.608270 4664 generic.go:334] "Generic (PLEG): container finished" podID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerID="3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed" exitCode=0 Oct 13 07:32:53 crc kubenswrapper[4664]: I1013 07:32:53.608338 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nvzs" event={"ID":"fff7037c-8c78-416b-a6a0-fa8eade0d9c8","Type":"ContainerDied","Data":"3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed"} Oct 13 07:32:54 crc kubenswrapper[4664]: I1013 07:32:54.635723 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nvzs" event={"ID":"fff7037c-8c78-416b-a6a0-fa8eade0d9c8","Type":"ContainerStarted","Data":"b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f"} Oct 13 07:32:54 crc kubenswrapper[4664]: I1013 07:32:54.638201 4664 generic.go:334] "Generic (PLEG): container finished" podID="90c29986-c915-4709-9fb8-f1055450cd9e" containerID="e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc" exitCode=0 Oct 13 07:32:54 crc kubenswrapper[4664]: I1013 07:32:54.638548 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmz74" event={"ID":"90c29986-c915-4709-9fb8-f1055450cd9e","Type":"ContainerDied","Data":"e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc"} Oct 13 07:32:54 crc kubenswrapper[4664]: I1013 07:32:54.661698 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6nvzs" podStartSLOduration=2.113727528 podStartE2EDuration="4.661669417s" podCreationTimestamp="2025-10-13 07:32:50 +0000 UTC" firstStartedPulling="2025-10-13 07:32:51.587957555 +0000 UTC m=+2779.275402747" lastFinishedPulling="2025-10-13 07:32:54.135899444 +0000 UTC m=+2781.823344636" observedRunningTime="2025-10-13 07:32:54.654086134 +0000 UTC m=+2782.341531346" watchObservedRunningTime="2025-10-13 07:32:54.661669417 +0000 UTC m=+2782.349114609" Oct 13 07:32:55 crc kubenswrapper[4664]: I1013 07:32:55.654059 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmz74" event={"ID":"90c29986-c915-4709-9fb8-f1055450cd9e","Type":"ContainerStarted","Data":"1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796"} Oct 13 07:32:55 crc kubenswrapper[4664]: I1013 07:32:55.692467 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lmz74" podStartSLOduration=3.142328973 podStartE2EDuration="8.692444925s" podCreationTimestamp="2025-10-13 07:32:47 +0000 UTC" firstStartedPulling="2025-10-13 07:32:49.566605329 +0000 UTC m=+2777.254050521" lastFinishedPulling="2025-10-13 07:32:55.116721241 +0000 UTC m=+2782.804166473" observedRunningTime="2025-10-13 07:32:55.679642521 +0000 UTC m=+2783.367087713" watchObservedRunningTime="2025-10-13 07:32:55.692444925 +0000 UTC m=+2783.379890117" Oct 13 07:32:58 crc kubenswrapper[4664]: I1013 07:32:58.182087 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:58 crc kubenswrapper[4664]: I1013 07:32:58.182539 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:32:59 crc kubenswrapper[4664]: I1013 07:32:59.232093 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lmz74" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="registry-server" probeResult="failure" output=< Oct 13 07:32:59 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:32:59 crc kubenswrapper[4664]: > Oct 13 07:33:00 crc kubenswrapper[4664]: I1013 07:33:00.801444 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:33:00 crc kubenswrapper[4664]: I1013 07:33:00.801845 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:33:00 crc kubenswrapper[4664]: I1013 07:33:00.851716 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:33:01 crc kubenswrapper[4664]: I1013 07:33:01.749531 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:33:01 crc kubenswrapper[4664]: I1013 07:33:01.796919 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nvzs"] Oct 13 07:33:03 crc kubenswrapper[4664]: I1013 07:33:03.721678 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6nvzs" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="registry-server" containerID="cri-o://b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f" gracePeriod=2 Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.192870 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.285543 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x66zv\" (UniqueName: \"kubernetes.io/projected/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-kube-api-access-x66zv\") pod \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.285672 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-catalog-content\") pod \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.285781 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-utilities\") pod \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\" (UID: \"fff7037c-8c78-416b-a6a0-fa8eade0d9c8\") " Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.286512 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-utilities" (OuterVolumeSpecName: "utilities") pod "fff7037c-8c78-416b-a6a0-fa8eade0d9c8" (UID: "fff7037c-8c78-416b-a6a0-fa8eade0d9c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.291063 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-kube-api-access-x66zv" (OuterVolumeSpecName: "kube-api-access-x66zv") pod "fff7037c-8c78-416b-a6a0-fa8eade0d9c8" (UID: "fff7037c-8c78-416b-a6a0-fa8eade0d9c8"). InnerVolumeSpecName "kube-api-access-x66zv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.299734 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fff7037c-8c78-416b-a6a0-fa8eade0d9c8" (UID: "fff7037c-8c78-416b-a6a0-fa8eade0d9c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.388381 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.388420 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x66zv\" (UniqueName: \"kubernetes.io/projected/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-kube-api-access-x66zv\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.388431 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fff7037c-8c78-416b-a6a0-fa8eade0d9c8-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.738940 4664 generic.go:334] "Generic (PLEG): container finished" podID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerID="b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f" exitCode=0 Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.739022 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nvzs" event={"ID":"fff7037c-8c78-416b-a6a0-fa8eade0d9c8","Type":"ContainerDied","Data":"b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f"} Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.739059 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6nvzs" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.739965 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6nvzs" event={"ID":"fff7037c-8c78-416b-a6a0-fa8eade0d9c8","Type":"ContainerDied","Data":"fff55a96f64d57df5861f42a24207956b43e363b62e9e5635922a2472d8b82f2"} Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.739985 4664 scope.go:117] "RemoveContainer" containerID="b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.770688 4664 scope.go:117] "RemoveContainer" containerID="3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.804424 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nvzs"] Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.815170 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6nvzs"] Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.817364 4664 scope.go:117] "RemoveContainer" containerID="3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.852829 4664 scope.go:117] "RemoveContainer" containerID="b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f" Oct 13 07:33:04 crc kubenswrapper[4664]: E1013 07:33:04.853554 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f\": container with ID starting with b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f not found: ID does not exist" containerID="b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.853608 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f"} err="failed to get container status \"b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f\": rpc error: code = NotFound desc = could not find container \"b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f\": container with ID starting with b05dec0b299b5f7993f0e10d7c8b4243f3d3ba55e03cae38cd7d189a07c27f8f not found: ID does not exist" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.853635 4664 scope.go:117] "RemoveContainer" containerID="3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed" Oct 13 07:33:04 crc kubenswrapper[4664]: E1013 07:33:04.854253 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed\": container with ID starting with 3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed not found: ID does not exist" containerID="3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.854289 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed"} err="failed to get container status \"3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed\": rpc error: code = NotFound desc = could not find container \"3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed\": container with ID starting with 3986b04b6166ac6a1ef25317df4ea132b0f7fbb8fa5193e33815b48c671940ed not found: ID does not exist" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.854310 4664 scope.go:117] "RemoveContainer" containerID="3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56" Oct 13 07:33:04 crc kubenswrapper[4664]: E1013 07:33:04.854589 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56\": container with ID starting with 3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56 not found: ID does not exist" containerID="3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56" Oct 13 07:33:04 crc kubenswrapper[4664]: I1013 07:33:04.854623 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56"} err="failed to get container status \"3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56\": rpc error: code = NotFound desc = could not find container \"3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56\": container with ID starting with 3c051ebe93a8977169c41f3977d17e7cb4aec39a67e2c8d0e3bea5dfa4291e56 not found: ID does not exist" Oct 13 07:33:05 crc kubenswrapper[4664]: I1013 07:33:05.057564 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" path="/var/lib/kubelet/pods/fff7037c-8c78-416b-a6a0-fa8eade0d9c8/volumes" Oct 13 07:33:09 crc kubenswrapper[4664]: I1013 07:33:09.239236 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lmz74" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="registry-server" probeResult="failure" output=< Oct 13 07:33:09 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:33:09 crc kubenswrapper[4664]: > Oct 13 07:33:18 crc kubenswrapper[4664]: I1013 07:33:18.240608 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:33:18 crc kubenswrapper[4664]: I1013 07:33:18.311557 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:33:19 crc kubenswrapper[4664]: I1013 07:33:19.060156 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lmz74"] Oct 13 07:33:19 crc kubenswrapper[4664]: I1013 07:33:19.898236 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lmz74" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="registry-server" containerID="cri-o://1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796" gracePeriod=2 Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.380773 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.570385 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jp6f\" (UniqueName: \"kubernetes.io/projected/90c29986-c915-4709-9fb8-f1055450cd9e-kube-api-access-9jp6f\") pod \"90c29986-c915-4709-9fb8-f1055450cd9e\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.570767 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-catalog-content\") pod \"90c29986-c915-4709-9fb8-f1055450cd9e\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.570851 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-utilities\") pod \"90c29986-c915-4709-9fb8-f1055450cd9e\" (UID: \"90c29986-c915-4709-9fb8-f1055450cd9e\") " Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.572289 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-utilities" (OuterVolumeSpecName: "utilities") pod "90c29986-c915-4709-9fb8-f1055450cd9e" (UID: "90c29986-c915-4709-9fb8-f1055450cd9e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.588202 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90c29986-c915-4709-9fb8-f1055450cd9e-kube-api-access-9jp6f" (OuterVolumeSpecName: "kube-api-access-9jp6f") pod "90c29986-c915-4709-9fb8-f1055450cd9e" (UID: "90c29986-c915-4709-9fb8-f1055450cd9e"). InnerVolumeSpecName "kube-api-access-9jp6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.666868 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90c29986-c915-4709-9fb8-f1055450cd9e" (UID: "90c29986-c915-4709-9fb8-f1055450cd9e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.672852 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jp6f\" (UniqueName: \"kubernetes.io/projected/90c29986-c915-4709-9fb8-f1055450cd9e-kube-api-access-9jp6f\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.672895 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.672905 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90c29986-c915-4709-9fb8-f1055450cd9e-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.908038 4664 generic.go:334] "Generic (PLEG): container finished" podID="90c29986-c915-4709-9fb8-f1055450cd9e" containerID="1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796" exitCode=0 Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.908076 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmz74" event={"ID":"90c29986-c915-4709-9fb8-f1055450cd9e","Type":"ContainerDied","Data":"1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796"} Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.908100 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmz74" event={"ID":"90c29986-c915-4709-9fb8-f1055450cd9e","Type":"ContainerDied","Data":"a8ec1d7cdcd1cf7f1c259a85601b2aa94fed4c897d192643bbc230d2cc6eeb79"} Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.908118 4664 scope.go:117] "RemoveContainer" containerID="1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.908226 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmz74" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.943275 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lmz74"] Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.946278 4664 scope.go:117] "RemoveContainer" containerID="e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc" Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.951133 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lmz74"] Oct 13 07:33:20 crc kubenswrapper[4664]: I1013 07:33:20.968193 4664 scope.go:117] "RemoveContainer" containerID="c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5" Oct 13 07:33:21 crc kubenswrapper[4664]: I1013 07:33:21.057989 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" path="/var/lib/kubelet/pods/90c29986-c915-4709-9fb8-f1055450cd9e/volumes" Oct 13 07:33:21 crc kubenswrapper[4664]: I1013 07:33:21.061840 4664 scope.go:117] "RemoveContainer" containerID="1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796" Oct 13 07:33:21 crc kubenswrapper[4664]: E1013 07:33:21.062467 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796\": container with ID starting with 1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796 not found: ID does not exist" containerID="1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796" Oct 13 07:33:21 crc kubenswrapper[4664]: I1013 07:33:21.062505 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796"} err="failed to get container status \"1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796\": rpc error: code = NotFound desc = could not find container \"1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796\": container with ID starting with 1b512510fb3bb81b074e0f27a0c84ff79cb79c27934e55b65e84357524fd4796 not found: ID does not exist" Oct 13 07:33:21 crc kubenswrapper[4664]: I1013 07:33:21.062536 4664 scope.go:117] "RemoveContainer" containerID="e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc" Oct 13 07:33:21 crc kubenswrapper[4664]: E1013 07:33:21.062962 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc\": container with ID starting with e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc not found: ID does not exist" containerID="e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc" Oct 13 07:33:21 crc kubenswrapper[4664]: I1013 07:33:21.062983 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc"} err="failed to get container status \"e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc\": rpc error: code = NotFound desc = could not find container \"e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc\": container with ID starting with e9027c84b981a0ceee85b9e8523e3efeda0121341b2c6e55ab658d22036f15bc not found: ID does not exist" Oct 13 07:33:21 crc kubenswrapper[4664]: I1013 07:33:21.063000 4664 scope.go:117] "RemoveContainer" containerID="c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5" Oct 13 07:33:21 crc kubenswrapper[4664]: E1013 07:33:21.063937 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5\": container with ID starting with c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5 not found: ID does not exist" containerID="c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5" Oct 13 07:33:21 crc kubenswrapper[4664]: I1013 07:33:21.063986 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5"} err="failed to get container status \"c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5\": rpc error: code = NotFound desc = could not find container \"c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5\": container with ID starting with c49525178cf984742f2922c47183ea408c7f5202bc8ebc03840b0a1bcc392dc5 not found: ID does not exist" Oct 13 07:33:45 crc kubenswrapper[4664]: I1013 07:33:45.182469 4664 generic.go:334] "Generic (PLEG): container finished" podID="618aef5f-9779-425d-9ce0-b827194143f4" containerID="c5be9398e7fbf6b98e9f6806a0731fa9bb65a9651bc1b40563e897be7a851987" exitCode=0 Oct 13 07:33:45 crc kubenswrapper[4664]: I1013 07:33:45.182552 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" event={"ID":"618aef5f-9779-425d-9ce0-b827194143f4","Type":"ContainerDied","Data":"c5be9398e7fbf6b98e9f6806a0731fa9bb65a9651bc1b40563e897be7a851987"} Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.637568 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.794281 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-1\") pod \"618aef5f-9779-425d-9ce0-b827194143f4\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.794581 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-telemetry-combined-ca-bundle\") pod \"618aef5f-9779-425d-9ce0-b827194143f4\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.794683 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-2\") pod \"618aef5f-9779-425d-9ce0-b827194143f4\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.794751 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6tx5\" (UniqueName: \"kubernetes.io/projected/618aef5f-9779-425d-9ce0-b827194143f4-kube-api-access-p6tx5\") pod \"618aef5f-9779-425d-9ce0-b827194143f4\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.794868 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-inventory\") pod \"618aef5f-9779-425d-9ce0-b827194143f4\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.794935 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ssh-key\") pod \"618aef5f-9779-425d-9ce0-b827194143f4\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.794974 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-0\") pod \"618aef5f-9779-425d-9ce0-b827194143f4\" (UID: \"618aef5f-9779-425d-9ce0-b827194143f4\") " Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.815829 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "618aef5f-9779-425d-9ce0-b827194143f4" (UID: "618aef5f-9779-425d-9ce0-b827194143f4"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.827632 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/618aef5f-9779-425d-9ce0-b827194143f4-kube-api-access-p6tx5" (OuterVolumeSpecName: "kube-api-access-p6tx5") pod "618aef5f-9779-425d-9ce0-b827194143f4" (UID: "618aef5f-9779-425d-9ce0-b827194143f4"). InnerVolumeSpecName "kube-api-access-p6tx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.830713 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "618aef5f-9779-425d-9ce0-b827194143f4" (UID: "618aef5f-9779-425d-9ce0-b827194143f4"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.835437 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "618aef5f-9779-425d-9ce0-b827194143f4" (UID: "618aef5f-9779-425d-9ce0-b827194143f4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.847675 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "618aef5f-9779-425d-9ce0-b827194143f4" (UID: "618aef5f-9779-425d-9ce0-b827194143f4"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.850278 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "618aef5f-9779-425d-9ce0-b827194143f4" (UID: "618aef5f-9779-425d-9ce0-b827194143f4"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.856937 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-inventory" (OuterVolumeSpecName: "inventory") pod "618aef5f-9779-425d-9ce0-b827194143f4" (UID: "618aef5f-9779-425d-9ce0-b827194143f4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.902876 4664 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.902938 4664 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.902957 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6tx5\" (UniqueName: \"kubernetes.io/projected/618aef5f-9779-425d-9ce0-b827194143f4-kube-api-access-p6tx5\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.902978 4664 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-inventory\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.902991 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.903008 4664 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:46 crc kubenswrapper[4664]: I1013 07:33:46.903022 4664 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/618aef5f-9779-425d-9ce0-b827194143f4-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Oct 13 07:33:47 crc kubenswrapper[4664]: I1013 07:33:47.202628 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" event={"ID":"618aef5f-9779-425d-9ce0-b827194143f4","Type":"ContainerDied","Data":"e3217f8d96b3406da2b4fe38a4f3eb3f0808e3a9d723945e3b35325d3fe974b8"} Oct 13 07:33:47 crc kubenswrapper[4664]: I1013 07:33:47.202931 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3217f8d96b3406da2b4fe38a4f3eb3f0808e3a9d723945e3b35325d3fe974b8" Oct 13 07:33:47 crc kubenswrapper[4664]: I1013 07:33:47.202738 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zcjht" Oct 13 07:33:58 crc kubenswrapper[4664]: I1013 07:33:58.812537 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:33:58 crc kubenswrapper[4664]: I1013 07:33:58.813135 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:34:28 crc kubenswrapper[4664]: I1013 07:34:28.811861 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:34:28 crc kubenswrapper[4664]: I1013 07:34:28.813018 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.232866 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-d5t28"] Oct 13 07:34:31 crc kubenswrapper[4664]: E1013 07:34:31.233451 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="extract-content" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233463 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="extract-content" Oct 13 07:34:31 crc kubenswrapper[4664]: E1013 07:34:31.233484 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="618aef5f-9779-425d-9ce0-b827194143f4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233501 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="618aef5f-9779-425d-9ce0-b827194143f4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Oct 13 07:34:31 crc kubenswrapper[4664]: E1013 07:34:31.233536 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="registry-server" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233541 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="registry-server" Oct 13 07:34:31 crc kubenswrapper[4664]: E1013 07:34:31.233550 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="extract-utilities" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233556 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="extract-utilities" Oct 13 07:34:31 crc kubenswrapper[4664]: E1013 07:34:31.233581 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="extract-utilities" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233587 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="extract-utilities" Oct 13 07:34:31 crc kubenswrapper[4664]: E1013 07:34:31.233599 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="registry-server" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233604 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="registry-server" Oct 13 07:34:31 crc kubenswrapper[4664]: E1013 07:34:31.233618 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="extract-content" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233624 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="extract-content" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233821 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="90c29986-c915-4709-9fb8-f1055450cd9e" containerName="registry-server" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233840 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="fff7037c-8c78-416b-a6a0-fa8eade0d9c8" containerName="registry-server" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.233848 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="618aef5f-9779-425d-9ce0-b827194143f4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.235117 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.249482 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d5t28"] Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.337941 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-utilities\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.338033 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-catalog-content\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.338079 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmpvm\" (UniqueName: \"kubernetes.io/projected/1f4759cd-5724-480c-9af0-6743c7883462-kube-api-access-tmpvm\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.440109 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-utilities\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.440198 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-catalog-content\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.440241 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmpvm\" (UniqueName: \"kubernetes.io/projected/1f4759cd-5724-480c-9af0-6743c7883462-kube-api-access-tmpvm\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.440682 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-utilities\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.440734 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-catalog-content\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.461179 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmpvm\" (UniqueName: \"kubernetes.io/projected/1f4759cd-5724-480c-9af0-6743c7883462-kube-api-access-tmpvm\") pod \"certified-operators-d5t28\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:31 crc kubenswrapper[4664]: I1013 07:34:31.574874 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:32 crc kubenswrapper[4664]: I1013 07:34:32.212927 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d5t28"] Oct 13 07:34:32 crc kubenswrapper[4664]: I1013 07:34:32.637962 4664 generic.go:334] "Generic (PLEG): container finished" podID="1f4759cd-5724-480c-9af0-6743c7883462" containerID="325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2" exitCode=0 Oct 13 07:34:32 crc kubenswrapper[4664]: I1013 07:34:32.638049 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5t28" event={"ID":"1f4759cd-5724-480c-9af0-6743c7883462","Type":"ContainerDied","Data":"325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2"} Oct 13 07:34:32 crc kubenswrapper[4664]: I1013 07:34:32.638101 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5t28" event={"ID":"1f4759cd-5724-480c-9af0-6743c7883462","Type":"ContainerStarted","Data":"a3aa4cbd8f1260e734f97848a506c2604e22aa13dbc2390d789d4bce1846e17c"} Oct 13 07:34:33 crc kubenswrapper[4664]: I1013 07:34:33.650535 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5t28" event={"ID":"1f4759cd-5724-480c-9af0-6743c7883462","Type":"ContainerStarted","Data":"5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1"} Oct 13 07:34:34 crc kubenswrapper[4664]: I1013 07:34:34.662612 4664 generic.go:334] "Generic (PLEG): container finished" podID="1f4759cd-5724-480c-9af0-6743c7883462" containerID="5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1" exitCode=0 Oct 13 07:34:34 crc kubenswrapper[4664]: I1013 07:34:34.662655 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5t28" event={"ID":"1f4759cd-5724-480c-9af0-6743c7883462","Type":"ContainerDied","Data":"5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1"} Oct 13 07:34:35 crc kubenswrapper[4664]: I1013 07:34:35.672231 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5t28" event={"ID":"1f4759cd-5724-480c-9af0-6743c7883462","Type":"ContainerStarted","Data":"c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb"} Oct 13 07:34:35 crc kubenswrapper[4664]: I1013 07:34:35.698091 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-d5t28" podStartSLOduration=2.170612935 podStartE2EDuration="4.698073044s" podCreationTimestamp="2025-10-13 07:34:31 +0000 UTC" firstStartedPulling="2025-10-13 07:34:32.641211384 +0000 UTC m=+2880.328656596" lastFinishedPulling="2025-10-13 07:34:35.168671513 +0000 UTC m=+2882.856116705" observedRunningTime="2025-10-13 07:34:35.694612541 +0000 UTC m=+2883.382057733" watchObservedRunningTime="2025-10-13 07:34:35.698073044 +0000 UTC m=+2883.385518236" Oct 13 07:34:41 crc kubenswrapper[4664]: I1013 07:34:41.575196 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:41 crc kubenswrapper[4664]: I1013 07:34:41.576076 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:41 crc kubenswrapper[4664]: I1013 07:34:41.637533 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:41 crc kubenswrapper[4664]: I1013 07:34:41.790828 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:41 crc kubenswrapper[4664]: I1013 07:34:41.879202 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d5t28"] Oct 13 07:34:43 crc kubenswrapper[4664]: I1013 07:34:43.753762 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-d5t28" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="registry-server" containerID="cri-o://c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb" gracePeriod=2 Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.244312 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.325494 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-catalog-content\") pod \"1f4759cd-5724-480c-9af0-6743c7883462\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.325657 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmpvm\" (UniqueName: \"kubernetes.io/projected/1f4759cd-5724-480c-9af0-6743c7883462-kube-api-access-tmpvm\") pod \"1f4759cd-5724-480c-9af0-6743c7883462\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.325778 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-utilities\") pod \"1f4759cd-5724-480c-9af0-6743c7883462\" (UID: \"1f4759cd-5724-480c-9af0-6743c7883462\") " Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.326539 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-utilities" (OuterVolumeSpecName: "utilities") pod "1f4759cd-5724-480c-9af0-6743c7883462" (UID: "1f4759cd-5724-480c-9af0-6743c7883462"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.331898 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f4759cd-5724-480c-9af0-6743c7883462-kube-api-access-tmpvm" (OuterVolumeSpecName: "kube-api-access-tmpvm") pod "1f4759cd-5724-480c-9af0-6743c7883462" (UID: "1f4759cd-5724-480c-9af0-6743c7883462"). InnerVolumeSpecName "kube-api-access-tmpvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.371828 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f4759cd-5724-480c-9af0-6743c7883462" (UID: "1f4759cd-5724-480c-9af0-6743c7883462"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.427934 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmpvm\" (UniqueName: \"kubernetes.io/projected/1f4759cd-5724-480c-9af0-6743c7883462-kube-api-access-tmpvm\") on node \"crc\" DevicePath \"\"" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.427972 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.427986 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f4759cd-5724-480c-9af0-6743c7883462-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.769190 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5t28" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.769143 4664 generic.go:334] "Generic (PLEG): container finished" podID="1f4759cd-5724-480c-9af0-6743c7883462" containerID="c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb" exitCode=0 Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.769279 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5t28" event={"ID":"1f4759cd-5724-480c-9af0-6743c7883462","Type":"ContainerDied","Data":"c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb"} Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.769353 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5t28" event={"ID":"1f4759cd-5724-480c-9af0-6743c7883462","Type":"ContainerDied","Data":"a3aa4cbd8f1260e734f97848a506c2604e22aa13dbc2390d789d4bce1846e17c"} Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.769376 4664 scope.go:117] "RemoveContainer" containerID="c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.822566 4664 scope.go:117] "RemoveContainer" containerID="5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.853857 4664 scope.go:117] "RemoveContainer" containerID="325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.855751 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d5t28"] Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.867380 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-d5t28"] Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.885959 4664 scope.go:117] "RemoveContainer" containerID="c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb" Oct 13 07:34:44 crc kubenswrapper[4664]: E1013 07:34:44.886227 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb\": container with ID starting with c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb not found: ID does not exist" containerID="c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.886259 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb"} err="failed to get container status \"c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb\": rpc error: code = NotFound desc = could not find container \"c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb\": container with ID starting with c08372209930f16c9e2604e7d766f4ee4de497955d08022ea930f1627489d3fb not found: ID does not exist" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.886281 4664 scope.go:117] "RemoveContainer" containerID="5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1" Oct 13 07:34:44 crc kubenswrapper[4664]: E1013 07:34:44.886463 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1\": container with ID starting with 5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1 not found: ID does not exist" containerID="5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.886491 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1"} err="failed to get container status \"5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1\": rpc error: code = NotFound desc = could not find container \"5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1\": container with ID starting with 5fed9f0285d836eb1c85cb22b6c3aee463a17ea4701b08af3cfc573a81c905d1 not found: ID does not exist" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.886504 4664 scope.go:117] "RemoveContainer" containerID="325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2" Oct 13 07:34:44 crc kubenswrapper[4664]: E1013 07:34:44.886684 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2\": container with ID starting with 325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2 not found: ID does not exist" containerID="325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2" Oct 13 07:34:44 crc kubenswrapper[4664]: I1013 07:34:44.886705 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2"} err="failed to get container status \"325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2\": rpc error: code = NotFound desc = could not find container \"325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2\": container with ID starting with 325fea77680edaeb22640610daebe8b89b302f41d4b222a1bcccaeb6596aa2b2 not found: ID does not exist" Oct 13 07:34:45 crc kubenswrapper[4664]: I1013 07:34:45.058201 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f4759cd-5724-480c-9af0-6743c7883462" path="/var/lib/kubelet/pods/1f4759cd-5724-480c-9af0-6743c7883462/volumes" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.215180 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Oct 13 07:34:51 crc kubenswrapper[4664]: E1013 07:34:51.216422 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="extract-utilities" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.216447 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="extract-utilities" Oct 13 07:34:51 crc kubenswrapper[4664]: E1013 07:34:51.216480 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="extract-content" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.216492 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="extract-content" Oct 13 07:34:51 crc kubenswrapper[4664]: E1013 07:34:51.216553 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="registry-server" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.216568 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="registry-server" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.216894 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f4759cd-5724-480c-9af0-6743c7883462" containerName="registry-server" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.217944 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.220402 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.220848 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.222644 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.224430 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-zpzpx" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.230854 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.274696 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.274773 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.274960 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.275044 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.275145 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.275198 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.275295 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n24d\" (UniqueName: \"kubernetes.io/projected/67f690b7-0671-4a15-9d4d-1c65126e8a9a-kube-api-access-4n24d\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.275335 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.275425 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377289 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377403 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377447 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377486 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377519 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377554 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377578 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377604 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n24d\" (UniqueName: \"kubernetes.io/projected/67f690b7-0671-4a15-9d4d-1c65126e8a9a-kube-api-access-4n24d\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.377622 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.378444 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.378680 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.378691 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.378826 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.379002 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.385343 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.385594 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.386114 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.399002 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n24d\" (UniqueName: \"kubernetes.io/projected/67f690b7-0671-4a15-9d4d-1c65126e8a9a-kube-api-access-4n24d\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.406531 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:51 crc kubenswrapper[4664]: I1013 07:34:51.553686 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:34:52 crc kubenswrapper[4664]: I1013 07:34:52.132022 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Oct 13 07:34:52 crc kubenswrapper[4664]: I1013 07:34:52.868436 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"67f690b7-0671-4a15-9d4d-1c65126e8a9a","Type":"ContainerStarted","Data":"baecad2c17351bbf6195218e671ad414c5d56f41cf71af248177cc81e0f4c7d5"} Oct 13 07:34:58 crc kubenswrapper[4664]: I1013 07:34:58.811634 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:34:58 crc kubenswrapper[4664]: I1013 07:34:58.812194 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:34:58 crc kubenswrapper[4664]: I1013 07:34:58.812235 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:34:58 crc kubenswrapper[4664]: I1013 07:34:58.812949 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"723a02a09e20f9e931cbe7400a3d4141c0cb5752da29e5cdf49ea790c6e70ae5"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:34:58 crc kubenswrapper[4664]: I1013 07:34:58.813015 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://723a02a09e20f9e931cbe7400a3d4141c0cb5752da29e5cdf49ea790c6e70ae5" gracePeriod=600 Oct 13 07:34:59 crc kubenswrapper[4664]: I1013 07:34:59.945322 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="723a02a09e20f9e931cbe7400a3d4141c0cb5752da29e5cdf49ea790c6e70ae5" exitCode=0 Oct 13 07:34:59 crc kubenswrapper[4664]: I1013 07:34:59.945396 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"723a02a09e20f9e931cbe7400a3d4141c0cb5752da29e5cdf49ea790c6e70ae5"} Oct 13 07:34:59 crc kubenswrapper[4664]: I1013 07:34:59.945900 4664 scope.go:117] "RemoveContainer" containerID="c39dd829bf2f8a3276edd6b57e3c215025aca35c839db8d7d51bc884d16d0f4c" Oct 13 07:35:02 crc kubenswrapper[4664]: I1013 07:35:02.990012 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19"} Oct 13 07:35:40 crc kubenswrapper[4664]: E1013 07:35:40.032396 4664 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:92672cd85fd36317d65faa0525acf849" Oct 13 07:35:40 crc kubenswrapper[4664]: E1013 07:35:40.033166 4664 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:92672cd85fd36317d65faa0525acf849" Oct 13 07:35:40 crc kubenswrapper[4664]: E1013 07:35:40.046099 4664 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:92672cd85fd36317d65faa0525acf849,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4n24d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest-s00-multi-thread-testing_openstack(67f690b7-0671-4a15-9d4d-1c65126e8a9a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 13 07:35:40 crc kubenswrapper[4664]: E1013 07:35:40.047968 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podUID="67f690b7-0671-4a15-9d4d-1c65126e8a9a" Oct 13 07:35:40 crc kubenswrapper[4664]: E1013 07:35:40.417079 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:92672cd85fd36317d65faa0525acf849\\\"\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podUID="67f690b7-0671-4a15-9d4d-1c65126e8a9a" Oct 13 07:35:56 crc kubenswrapper[4664]: I1013 07:35:56.048988 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:35:56 crc kubenswrapper[4664]: I1013 07:35:56.275941 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Oct 13 07:35:58 crc kubenswrapper[4664]: I1013 07:35:58.599545 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"67f690b7-0671-4a15-9d4d-1c65126e8a9a","Type":"ContainerStarted","Data":"6849ef9f6c082e8ad06890c2d64096d555bc842a91c14029b17ceb2088238852"} Oct 13 07:35:58 crc kubenswrapper[4664]: I1013 07:35:58.635269 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podStartSLOduration=4.502416256 podStartE2EDuration="1m8.635249811s" podCreationTimestamp="2025-10-13 07:34:50 +0000 UTC" firstStartedPulling="2025-10-13 07:34:52.13998278 +0000 UTC m=+2899.827427972" lastFinishedPulling="2025-10-13 07:35:56.272816325 +0000 UTC m=+2963.960261527" observedRunningTime="2025-10-13 07:35:58.624651787 +0000 UTC m=+2966.312096999" watchObservedRunningTime="2025-10-13 07:35:58.635249811 +0000 UTC m=+2966.322695003" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.774273 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5hrqb"] Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.778060 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.896099 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzjmb\" (UniqueName: \"kubernetes.io/projected/3960f54e-4fe8-4198-adf1-3aea88880c0f-kube-api-access-xzjmb\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.896299 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3960f54e-4fe8-4198-adf1-3aea88880c0f-catalog-content\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.896756 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3960f54e-4fe8-4198-adf1-3aea88880c0f-utilities\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.998346 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3960f54e-4fe8-4198-adf1-3aea88880c0f-utilities\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.998427 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzjmb\" (UniqueName: \"kubernetes.io/projected/3960f54e-4fe8-4198-adf1-3aea88880c0f-kube-api-access-xzjmb\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.998510 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3960f54e-4fe8-4198-adf1-3aea88880c0f-catalog-content\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.998986 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3960f54e-4fe8-4198-adf1-3aea88880c0f-catalog-content\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:23 crc kubenswrapper[4664]: I1013 07:37:23.999582 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3960f54e-4fe8-4198-adf1-3aea88880c0f-utilities\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:24 crc kubenswrapper[4664]: I1013 07:37:24.031302 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5hrqb"] Oct 13 07:37:24 crc kubenswrapper[4664]: I1013 07:37:24.051038 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzjmb\" (UniqueName: \"kubernetes.io/projected/3960f54e-4fe8-4198-adf1-3aea88880c0f-kube-api-access-xzjmb\") pod \"community-operators-5hrqb\" (UID: \"3960f54e-4fe8-4198-adf1-3aea88880c0f\") " pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:24 crc kubenswrapper[4664]: I1013 07:37:24.102333 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:26 crc kubenswrapper[4664]: I1013 07:37:26.266548 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5hrqb"] Oct 13 07:37:26 crc kubenswrapper[4664]: I1013 07:37:26.484636 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hrqb" event={"ID":"3960f54e-4fe8-4198-adf1-3aea88880c0f","Type":"ContainerStarted","Data":"4438f0ce83eda3cf03726b5bd975ba1a84cb9a63249fefe24ea1d26666609697"} Oct 13 07:37:27 crc kubenswrapper[4664]: I1013 07:37:27.493739 4664 generic.go:334] "Generic (PLEG): container finished" podID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerID="0971f5d664b9a47aac523bedd2d587655fc75d68baa9856eb89f0f619ec815e2" exitCode=0 Oct 13 07:37:27 crc kubenswrapper[4664]: I1013 07:37:27.493784 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hrqb" event={"ID":"3960f54e-4fe8-4198-adf1-3aea88880c0f","Type":"ContainerDied","Data":"0971f5d664b9a47aac523bedd2d587655fc75d68baa9856eb89f0f619ec815e2"} Oct 13 07:37:28 crc kubenswrapper[4664]: I1013 07:37:28.812414 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:37:28 crc kubenswrapper[4664]: I1013 07:37:28.812833 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:37:32 crc kubenswrapper[4664]: I1013 07:37:32.546225 4664 generic.go:334] "Generic (PLEG): container finished" podID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerID="226659d680b3dbb2b32bb7f07a953289a90e8670c470f4ae13fb9132621ce0c6" exitCode=0 Oct 13 07:37:32 crc kubenswrapper[4664]: I1013 07:37:32.546401 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hrqb" event={"ID":"3960f54e-4fe8-4198-adf1-3aea88880c0f","Type":"ContainerDied","Data":"226659d680b3dbb2b32bb7f07a953289a90e8670c470f4ae13fb9132621ce0c6"} Oct 13 07:37:33 crc kubenswrapper[4664]: I1013 07:37:33.560450 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hrqb" event={"ID":"3960f54e-4fe8-4198-adf1-3aea88880c0f","Type":"ContainerStarted","Data":"1f9d5018b8ba439093f8fed7bce828ed589057d979019c436eac178941e28751"} Oct 13 07:37:33 crc kubenswrapper[4664]: I1013 07:37:33.606349 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5hrqb" podStartSLOduration=4.894214076 podStartE2EDuration="10.606325139s" podCreationTimestamp="2025-10-13 07:37:23 +0000 UTC" firstStartedPulling="2025-10-13 07:37:27.495925157 +0000 UTC m=+3055.183370349" lastFinishedPulling="2025-10-13 07:37:33.20803622 +0000 UTC m=+3060.895481412" observedRunningTime="2025-10-13 07:37:33.584053521 +0000 UTC m=+3061.271498713" watchObservedRunningTime="2025-10-13 07:37:33.606325139 +0000 UTC m=+3061.293770351" Oct 13 07:37:34 crc kubenswrapper[4664]: I1013 07:37:34.103525 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:34 crc kubenswrapper[4664]: I1013 07:37:34.104101 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:35 crc kubenswrapper[4664]: I1013 07:37:35.147733 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-5hrqb" podUID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerName="registry-server" probeResult="failure" output=< Oct 13 07:37:35 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:37:35 crc kubenswrapper[4664]: > Oct 13 07:37:45 crc kubenswrapper[4664]: I1013 07:37:45.179442 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-5hrqb" podUID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerName="registry-server" probeResult="failure" output=< Oct 13 07:37:45 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:37:45 crc kubenswrapper[4664]: > Oct 13 07:37:54 crc kubenswrapper[4664]: I1013 07:37:54.167602 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:54 crc kubenswrapper[4664]: I1013 07:37:54.229509 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5hrqb" Oct 13 07:37:54 crc kubenswrapper[4664]: I1013 07:37:54.819296 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5hrqb"] Oct 13 07:37:54 crc kubenswrapper[4664]: I1013 07:37:54.987449 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdjxj"] Oct 13 07:37:54 crc kubenswrapper[4664]: I1013 07:37:54.987690 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vdjxj" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="registry-server" containerID="cri-o://816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9" gracePeriod=2 Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.555269 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.667387 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-utilities\") pod \"389566e0-cafb-4bd2-aa8b-76efccfa1048\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.667602 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-catalog-content\") pod \"389566e0-cafb-4bd2-aa8b-76efccfa1048\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.667633 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpdhq\" (UniqueName: \"kubernetes.io/projected/389566e0-cafb-4bd2-aa8b-76efccfa1048-kube-api-access-fpdhq\") pod \"389566e0-cafb-4bd2-aa8b-76efccfa1048\" (UID: \"389566e0-cafb-4bd2-aa8b-76efccfa1048\") " Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.668949 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-utilities" (OuterVolumeSpecName: "utilities") pod "389566e0-cafb-4bd2-aa8b-76efccfa1048" (UID: "389566e0-cafb-4bd2-aa8b-76efccfa1048"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.690865 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/389566e0-cafb-4bd2-aa8b-76efccfa1048-kube-api-access-fpdhq" (OuterVolumeSpecName: "kube-api-access-fpdhq") pod "389566e0-cafb-4bd2-aa8b-76efccfa1048" (UID: "389566e0-cafb-4bd2-aa8b-76efccfa1048"). InnerVolumeSpecName "kube-api-access-fpdhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.724945 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "389566e0-cafb-4bd2-aa8b-76efccfa1048" (UID: "389566e0-cafb-4bd2-aa8b-76efccfa1048"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.769546 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.769579 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpdhq\" (UniqueName: \"kubernetes.io/projected/389566e0-cafb-4bd2-aa8b-76efccfa1048-kube-api-access-fpdhq\") on node \"crc\" DevicePath \"\"" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.769591 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/389566e0-cafb-4bd2-aa8b-76efccfa1048-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.824442 4664 generic.go:334] "Generic (PLEG): container finished" podID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerID="816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9" exitCode=0 Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.824504 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdjxj" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.824532 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdjxj" event={"ID":"389566e0-cafb-4bd2-aa8b-76efccfa1048","Type":"ContainerDied","Data":"816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9"} Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.824580 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdjxj" event={"ID":"389566e0-cafb-4bd2-aa8b-76efccfa1048","Type":"ContainerDied","Data":"e1e51707cfa66cfc058bc6d176e005c4ec6eff45013e3e5c459fa4f2f99dec3a"} Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.824596 4664 scope.go:117] "RemoveContainer" containerID="816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.849586 4664 scope.go:117] "RemoveContainer" containerID="a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.860287 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdjxj"] Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.870375 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vdjxj"] Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.883172 4664 scope.go:117] "RemoveContainer" containerID="157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.915522 4664 scope.go:117] "RemoveContainer" containerID="816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9" Oct 13 07:37:55 crc kubenswrapper[4664]: E1013 07:37:55.916121 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9\": container with ID starting with 816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9 not found: ID does not exist" containerID="816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.916178 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9"} err="failed to get container status \"816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9\": rpc error: code = NotFound desc = could not find container \"816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9\": container with ID starting with 816fdc20ab11ceee9ad7d85b165b1b7a8e47bfd504afb2cc20d4d898977809a9 not found: ID does not exist" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.916208 4664 scope.go:117] "RemoveContainer" containerID="a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873" Oct 13 07:37:55 crc kubenswrapper[4664]: E1013 07:37:55.916641 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873\": container with ID starting with a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873 not found: ID does not exist" containerID="a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.916665 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873"} err="failed to get container status \"a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873\": rpc error: code = NotFound desc = could not find container \"a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873\": container with ID starting with a3ac85996d8a27442d6213e679c0dfbfa94b7e6acb0ff8fdb3549a5986566873 not found: ID does not exist" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.916696 4664 scope.go:117] "RemoveContainer" containerID="157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37" Oct 13 07:37:55 crc kubenswrapper[4664]: E1013 07:37:55.917040 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37\": container with ID starting with 157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37 not found: ID does not exist" containerID="157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37" Oct 13 07:37:55 crc kubenswrapper[4664]: I1013 07:37:55.917077 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37"} err="failed to get container status \"157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37\": rpc error: code = NotFound desc = could not find container \"157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37\": container with ID starting with 157610987bf39d458f62332cac5ae88d0f35906ade431860b449da76b3fbfa37 not found: ID does not exist" Oct 13 07:37:57 crc kubenswrapper[4664]: I1013 07:37:57.061032 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" path="/var/lib/kubelet/pods/389566e0-cafb-4bd2-aa8b-76efccfa1048/volumes" Oct 13 07:37:58 crc kubenswrapper[4664]: I1013 07:37:58.811446 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:37:58 crc kubenswrapper[4664]: I1013 07:37:58.811765 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:38:28 crc kubenswrapper[4664]: I1013 07:38:28.812136 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:38:28 crc kubenswrapper[4664]: I1013 07:38:28.812702 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:38:28 crc kubenswrapper[4664]: I1013 07:38:28.812752 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:38:28 crc kubenswrapper[4664]: I1013 07:38:28.813762 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:38:28 crc kubenswrapper[4664]: I1013 07:38:28.813886 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" gracePeriod=600 Oct 13 07:38:28 crc kubenswrapper[4664]: E1013 07:38:28.954238 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:38:29 crc kubenswrapper[4664]: I1013 07:38:29.177847 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19"} Oct 13 07:38:29 crc kubenswrapper[4664]: I1013 07:38:29.177898 4664 scope.go:117] "RemoveContainer" containerID="723a02a09e20f9e931cbe7400a3d4141c0cb5752da29e5cdf49ea790c6e70ae5" Oct 13 07:38:29 crc kubenswrapper[4664]: I1013 07:38:29.177787 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" exitCode=0 Oct 13 07:38:29 crc kubenswrapper[4664]: I1013 07:38:29.178552 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:38:29 crc kubenswrapper[4664]: E1013 07:38:29.178819 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:38:44 crc kubenswrapper[4664]: I1013 07:38:44.047328 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:38:44 crc kubenswrapper[4664]: E1013 07:38:44.048063 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:38:55 crc kubenswrapper[4664]: I1013 07:38:55.047425 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:38:55 crc kubenswrapper[4664]: E1013 07:38:55.048020 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:39:08 crc kubenswrapper[4664]: I1013 07:39:08.046780 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:39:08 crc kubenswrapper[4664]: E1013 07:39:08.047601 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:39:20 crc kubenswrapper[4664]: I1013 07:39:20.047502 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:39:20 crc kubenswrapper[4664]: E1013 07:39:20.048491 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:39:32 crc kubenswrapper[4664]: I1013 07:39:32.048238 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:39:32 crc kubenswrapper[4664]: E1013 07:39:32.048826 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:39:46 crc kubenswrapper[4664]: I1013 07:39:46.048131 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:39:46 crc kubenswrapper[4664]: E1013 07:39:46.048880 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:39:57 crc kubenswrapper[4664]: I1013 07:39:57.047550 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:39:57 crc kubenswrapper[4664]: E1013 07:39:57.048458 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:40:11 crc kubenswrapper[4664]: I1013 07:40:11.047874 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:40:11 crc kubenswrapper[4664]: E1013 07:40:11.048734 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:40:23 crc kubenswrapper[4664]: I1013 07:40:23.058523 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:40:23 crc kubenswrapper[4664]: E1013 07:40:23.059747 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:40:36 crc kubenswrapper[4664]: I1013 07:40:36.048028 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:40:36 crc kubenswrapper[4664]: E1013 07:40:36.049729 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:40:49 crc kubenswrapper[4664]: I1013 07:40:49.047445 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:40:49 crc kubenswrapper[4664]: E1013 07:40:49.048446 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:41:00 crc kubenswrapper[4664]: I1013 07:41:00.048265 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:41:00 crc kubenswrapper[4664]: E1013 07:41:00.049294 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:41:11 crc kubenswrapper[4664]: I1013 07:41:11.046826 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:41:11 crc kubenswrapper[4664]: E1013 07:41:11.047760 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:41:25 crc kubenswrapper[4664]: I1013 07:41:25.048590 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:41:25 crc kubenswrapper[4664]: E1013 07:41:25.049418 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:41:37 crc kubenswrapper[4664]: I1013 07:41:37.051503 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:41:37 crc kubenswrapper[4664]: E1013 07:41:37.052152 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:41:51 crc kubenswrapper[4664]: I1013 07:41:51.064284 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:41:51 crc kubenswrapper[4664]: E1013 07:41:51.065259 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:42:02 crc kubenswrapper[4664]: I1013 07:42:02.046777 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:42:02 crc kubenswrapper[4664]: E1013 07:42:02.047488 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:42:17 crc kubenswrapper[4664]: I1013 07:42:17.047561 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:42:17 crc kubenswrapper[4664]: E1013 07:42:17.048856 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:42:32 crc kubenswrapper[4664]: I1013 07:42:32.047702 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:42:32 crc kubenswrapper[4664]: E1013 07:42:32.048687 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:42:43 crc kubenswrapper[4664]: I1013 07:42:43.079623 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:42:43 crc kubenswrapper[4664]: E1013 07:42:43.080561 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:42:58 crc kubenswrapper[4664]: I1013 07:42:58.047684 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:42:58 crc kubenswrapper[4664]: E1013 07:42:58.048556 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.047808 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:43:10 crc kubenswrapper[4664]: E1013 07:43:10.049474 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.337687 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-k5tst"] Oct 13 07:43:10 crc kubenswrapper[4664]: E1013 07:43:10.342649 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="registry-server" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.342675 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="registry-server" Oct 13 07:43:10 crc kubenswrapper[4664]: E1013 07:43:10.343034 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="extract-content" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.343051 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="extract-content" Oct 13 07:43:10 crc kubenswrapper[4664]: E1013 07:43:10.343104 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="extract-utilities" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.343112 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="extract-utilities" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.344381 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="389566e0-cafb-4bd2-aa8b-76efccfa1048" containerName="registry-server" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.353919 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.364346 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5tst"] Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.453438 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-catalog-content\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.453788 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4l5v\" (UniqueName: \"kubernetes.io/projected/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-kube-api-access-x4l5v\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.453851 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-utilities\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.559243 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-catalog-content\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.559300 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4l5v\" (UniqueName: \"kubernetes.io/projected/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-kube-api-access-x4l5v\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.559328 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-utilities\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.570933 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-catalog-content\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.572278 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-utilities\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.628504 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4l5v\" (UniqueName: \"kubernetes.io/projected/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-kube-api-access-x4l5v\") pod \"redhat-marketplace-k5tst\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:10 crc kubenswrapper[4664]: I1013 07:43:10.680487 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:12 crc kubenswrapper[4664]: I1013 07:43:12.060351 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5tst"] Oct 13 07:43:12 crc kubenswrapper[4664]: W1013 07:43:12.095433 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cc6111e_938b_44d3_ad1e_f7fcbc2d9c68.slice/crio-5e8134435208ec87bb9732eb7ed39f9f0a094d1abb76baa720319d38ed72863d WatchSource:0}: Error finding container 5e8134435208ec87bb9732eb7ed39f9f0a094d1abb76baa720319d38ed72863d: Status 404 returned error can't find the container with id 5e8134435208ec87bb9732eb7ed39f9f0a094d1abb76baa720319d38ed72863d Oct 13 07:43:12 crc kubenswrapper[4664]: I1013 07:43:12.888210 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5tst" event={"ID":"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68","Type":"ContainerDied","Data":"be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6"} Oct 13 07:43:12 crc kubenswrapper[4664]: I1013 07:43:12.888480 4664 generic.go:334] "Generic (PLEG): container finished" podID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerID="be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6" exitCode=0 Oct 13 07:43:12 crc kubenswrapper[4664]: I1013 07:43:12.888525 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5tst" event={"ID":"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68","Type":"ContainerStarted","Data":"5e8134435208ec87bb9732eb7ed39f9f0a094d1abb76baa720319d38ed72863d"} Oct 13 07:43:12 crc kubenswrapper[4664]: I1013 07:43:12.896309 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:43:13 crc kubenswrapper[4664]: I1013 07:43:13.897933 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5tst" event={"ID":"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68","Type":"ContainerStarted","Data":"759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7"} Oct 13 07:43:14 crc kubenswrapper[4664]: I1013 07:43:14.906838 4664 generic.go:334] "Generic (PLEG): container finished" podID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerID="759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7" exitCode=0 Oct 13 07:43:14 crc kubenswrapper[4664]: I1013 07:43:14.906896 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5tst" event={"ID":"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68","Type":"ContainerDied","Data":"759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7"} Oct 13 07:43:15 crc kubenswrapper[4664]: I1013 07:43:15.921431 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5tst" event={"ID":"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68","Type":"ContainerStarted","Data":"542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d"} Oct 13 07:43:15 crc kubenswrapper[4664]: I1013 07:43:15.943574 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-k5tst" podStartSLOduration=3.5217841720000003 podStartE2EDuration="5.942271246s" podCreationTimestamp="2025-10-13 07:43:10 +0000 UTC" firstStartedPulling="2025-10-13 07:43:12.890036368 +0000 UTC m=+3400.577481570" lastFinishedPulling="2025-10-13 07:43:15.310523452 +0000 UTC m=+3402.997968644" observedRunningTime="2025-10-13 07:43:15.935716179 +0000 UTC m=+3403.623161381" watchObservedRunningTime="2025-10-13 07:43:15.942271246 +0000 UTC m=+3403.629716438" Oct 13 07:43:20 crc kubenswrapper[4664]: I1013 07:43:20.682402 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:20 crc kubenswrapper[4664]: I1013 07:43:20.683116 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:20 crc kubenswrapper[4664]: I1013 07:43:20.732385 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:21 crc kubenswrapper[4664]: I1013 07:43:21.020787 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:21 crc kubenswrapper[4664]: I1013 07:43:21.075457 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5tst"] Oct 13 07:43:22 crc kubenswrapper[4664]: I1013 07:43:22.051227 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:43:22 crc kubenswrapper[4664]: E1013 07:43:22.052502 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:43:22 crc kubenswrapper[4664]: I1013 07:43:22.981530 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-k5tst" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="registry-server" containerID="cri-o://542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d" gracePeriod=2 Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.661056 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.742619 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-catalog-content\") pod \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.742699 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4l5v\" (UniqueName: \"kubernetes.io/projected/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-kube-api-access-x4l5v\") pod \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.742888 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-utilities\") pod \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\" (UID: \"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68\") " Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.749464 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-utilities" (OuterVolumeSpecName: "utilities") pod "5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" (UID: "5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.763817 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" (UID: "5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.799918 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-kube-api-access-x4l5v" (OuterVolumeSpecName: "kube-api-access-x4l5v") pod "5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" (UID: "5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68"). InnerVolumeSpecName "kube-api-access-x4l5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.845499 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.845679 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4l5v\" (UniqueName: \"kubernetes.io/projected/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-kube-api-access-x4l5v\") on node \"crc\" DevicePath \"\"" Oct 13 07:43:23 crc kubenswrapper[4664]: I1013 07:43:23.845778 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.004888 4664 generic.go:334] "Generic (PLEG): container finished" podID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerID="542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d" exitCode=0 Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.004949 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5tst" event={"ID":"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68","Type":"ContainerDied","Data":"542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d"} Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.004955 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5tst" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.004990 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5tst" event={"ID":"5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68","Type":"ContainerDied","Data":"5e8134435208ec87bb9732eb7ed39f9f0a094d1abb76baa720319d38ed72863d"} Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.005009 4664 scope.go:117] "RemoveContainer" containerID="542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.050446 4664 scope.go:117] "RemoveContainer" containerID="759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.055898 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5tst"] Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.064397 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5tst"] Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.079265 4664 scope.go:117] "RemoveContainer" containerID="be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.147225 4664 scope.go:117] "RemoveContainer" containerID="542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d" Oct 13 07:43:24 crc kubenswrapper[4664]: E1013 07:43:24.156493 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d\": container with ID starting with 542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d not found: ID does not exist" containerID="542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.157752 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d"} err="failed to get container status \"542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d\": rpc error: code = NotFound desc = could not find container \"542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d\": container with ID starting with 542189ed4a0f1f0000624f300a84243fcb62c78e4f47846c8c1ca37ba6eb1c7d not found: ID does not exist" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.157808 4664 scope.go:117] "RemoveContainer" containerID="759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7" Oct 13 07:43:24 crc kubenswrapper[4664]: E1013 07:43:24.160498 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7\": container with ID starting with 759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7 not found: ID does not exist" containerID="759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.160535 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7"} err="failed to get container status \"759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7\": rpc error: code = NotFound desc = could not find container \"759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7\": container with ID starting with 759f1bf40aff4c1ae355ad22fb42108e90938cc01d000573070339650e7053e7 not found: ID does not exist" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.160558 4664 scope.go:117] "RemoveContainer" containerID="be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6" Oct 13 07:43:24 crc kubenswrapper[4664]: E1013 07:43:24.161003 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6\": container with ID starting with be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6 not found: ID does not exist" containerID="be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6" Oct 13 07:43:24 crc kubenswrapper[4664]: I1013 07:43:24.161033 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6"} err="failed to get container status \"be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6\": rpc error: code = NotFound desc = could not find container \"be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6\": container with ID starting with be3eea591bb1398ff7914463ee19a375a54f04aef09118232210d04f43670bb6 not found: ID does not exist" Oct 13 07:43:25 crc kubenswrapper[4664]: I1013 07:43:25.059511 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" path="/var/lib/kubelet/pods/5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68/volumes" Oct 13 07:43:35 crc kubenswrapper[4664]: I1013 07:43:35.046490 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:43:36 crc kubenswrapper[4664]: I1013 07:43:36.117444 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"ee5925000477c21de7da6a1d5e4433bf54c310755c11f6e082f8a63ffd438dad"} Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.113169 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dpvmf"] Oct 13 07:44:42 crc kubenswrapper[4664]: E1013 07:44:42.114116 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="extract-content" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.114131 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="extract-content" Oct 13 07:44:42 crc kubenswrapper[4664]: E1013 07:44:42.114154 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="registry-server" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.114160 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="registry-server" Oct 13 07:44:42 crc kubenswrapper[4664]: E1013 07:44:42.114180 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="extract-utilities" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.114187 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="extract-utilities" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.114371 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cc6111e-938b-44d3-ad1e-f7fcbc2d9c68" containerName="registry-server" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.116335 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.131669 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dpvmf"] Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.236104 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-catalog-content\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.236179 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-utilities\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.236255 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-699wb\" (UniqueName: \"kubernetes.io/projected/a6a06062-3ef8-4278-a633-f1158d79a843-kube-api-access-699wb\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.338036 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-catalog-content\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.338109 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-utilities\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.338202 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-699wb\" (UniqueName: \"kubernetes.io/projected/a6a06062-3ef8-4278-a633-f1158d79a843-kube-api-access-699wb\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.342983 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-catalog-content\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.344285 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-utilities\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.365109 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-699wb\" (UniqueName: \"kubernetes.io/projected/a6a06062-3ef8-4278-a633-f1158d79a843-kube-api-access-699wb\") pod \"redhat-operators-dpvmf\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:42 crc kubenswrapper[4664]: I1013 07:44:42.444931 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:43 crc kubenswrapper[4664]: I1013 07:44:43.439971 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dpvmf"] Oct 13 07:44:43 crc kubenswrapper[4664]: W1013 07:44:43.453358 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6a06062_3ef8_4278_a633_f1158d79a843.slice/crio-a2291c2cf8bb3c260be568d19b5a5ac4ba1e4e59336cd13fc2003730078dfa9f WatchSource:0}: Error finding container a2291c2cf8bb3c260be568d19b5a5ac4ba1e4e59336cd13fc2003730078dfa9f: Status 404 returned error can't find the container with id a2291c2cf8bb3c260be568d19b5a5ac4ba1e4e59336cd13fc2003730078dfa9f Oct 13 07:44:43 crc kubenswrapper[4664]: I1013 07:44:43.767550 4664 generic.go:334] "Generic (PLEG): container finished" podID="a6a06062-3ef8-4278-a633-f1158d79a843" containerID="ab318a3e3bf5ad05bc492bc77a4ac94dbbe8fac2a1c0c654623637eea2a4789b" exitCode=0 Oct 13 07:44:43 crc kubenswrapper[4664]: I1013 07:44:43.767624 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dpvmf" event={"ID":"a6a06062-3ef8-4278-a633-f1158d79a843","Type":"ContainerDied","Data":"ab318a3e3bf5ad05bc492bc77a4ac94dbbe8fac2a1c0c654623637eea2a4789b"} Oct 13 07:44:43 crc kubenswrapper[4664]: I1013 07:44:43.767877 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dpvmf" event={"ID":"a6a06062-3ef8-4278-a633-f1158d79a843","Type":"ContainerStarted","Data":"a2291c2cf8bb3c260be568d19b5a5ac4ba1e4e59336cd13fc2003730078dfa9f"} Oct 13 07:44:45 crc kubenswrapper[4664]: I1013 07:44:45.789072 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dpvmf" event={"ID":"a6a06062-3ef8-4278-a633-f1158d79a843","Type":"ContainerStarted","Data":"bd5752a7c269c796e0b27e3067421aa92e205eedaf6bc7c1b8d445ed1b99e9d7"} Oct 13 07:44:48 crc kubenswrapper[4664]: I1013 07:44:48.810805 4664 generic.go:334] "Generic (PLEG): container finished" podID="a6a06062-3ef8-4278-a633-f1158d79a843" containerID="bd5752a7c269c796e0b27e3067421aa92e205eedaf6bc7c1b8d445ed1b99e9d7" exitCode=0 Oct 13 07:44:48 crc kubenswrapper[4664]: I1013 07:44:48.810910 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dpvmf" event={"ID":"a6a06062-3ef8-4278-a633-f1158d79a843","Type":"ContainerDied","Data":"bd5752a7c269c796e0b27e3067421aa92e205eedaf6bc7c1b8d445ed1b99e9d7"} Oct 13 07:44:49 crc kubenswrapper[4664]: I1013 07:44:49.822502 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dpvmf" event={"ID":"a6a06062-3ef8-4278-a633-f1158d79a843","Type":"ContainerStarted","Data":"42618cd901a15851321267a2b2141dceb6f48ea479dc98e1f1cc507bd6789558"} Oct 13 07:44:49 crc kubenswrapper[4664]: I1013 07:44:49.849603 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dpvmf" podStartSLOduration=2.301041047 podStartE2EDuration="7.847882598s" podCreationTimestamp="2025-10-13 07:44:42 +0000 UTC" firstStartedPulling="2025-10-13 07:44:43.769740951 +0000 UTC m=+3491.457186153" lastFinishedPulling="2025-10-13 07:44:49.316582512 +0000 UTC m=+3497.004027704" observedRunningTime="2025-10-13 07:44:49.843020606 +0000 UTC m=+3497.530465808" watchObservedRunningTime="2025-10-13 07:44:49.847882598 +0000 UTC m=+3497.535327790" Oct 13 07:44:52 crc kubenswrapper[4664]: I1013 07:44:52.446141 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:52 crc kubenswrapper[4664]: I1013 07:44:52.446490 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:44:53 crc kubenswrapper[4664]: I1013 07:44:53.510486 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dpvmf" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" probeResult="failure" output=< Oct 13 07:44:53 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:44:53 crc kubenswrapper[4664]: > Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.430314 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269"] Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.432471 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.456495 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269"] Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.474254 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.486287 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdcqm\" (UniqueName: \"kubernetes.io/projected/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-kube-api-access-tdcqm\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.486455 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-config-volume\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.486521 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-secret-volume\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.513280 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.588529 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-secret-volume\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.588654 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdcqm\" (UniqueName: \"kubernetes.io/projected/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-kube-api-access-tdcqm\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.588770 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-config-volume\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.606397 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-config-volume\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.630485 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-secret-volume\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.631669 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdcqm\" (UniqueName: \"kubernetes.io/projected/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-kube-api-access-tdcqm\") pod \"collect-profiles-29339025-2q269\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:00 crc kubenswrapper[4664]: I1013 07:45:00.781493 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:01 crc kubenswrapper[4664]: I1013 07:45:01.445071 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269"] Oct 13 07:45:01 crc kubenswrapper[4664]: I1013 07:45:01.920253 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" event={"ID":"90a5f03e-7c3b-4264-ae15-83ce4dd9d890","Type":"ContainerStarted","Data":"a86c63121fd104cc0b20de960528c4dc7adba56889369380e4af08df22101756"} Oct 13 07:45:01 crc kubenswrapper[4664]: I1013 07:45:01.920568 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" event={"ID":"90a5f03e-7c3b-4264-ae15-83ce4dd9d890","Type":"ContainerStarted","Data":"862b02f9e853816bc333c7d968dd6a20e2b44eeb5985575ca16978b158557ac3"} Oct 13 07:45:01 crc kubenswrapper[4664]: I1013 07:45:01.941724 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" podStartSLOduration=1.941705846 podStartE2EDuration="1.941705846s" podCreationTimestamp="2025-10-13 07:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:45:01.931966633 +0000 UTC m=+3509.619411815" watchObservedRunningTime="2025-10-13 07:45:01.941705846 +0000 UTC m=+3509.629151038" Oct 13 07:45:02 crc kubenswrapper[4664]: I1013 07:45:02.937956 4664 generic.go:334] "Generic (PLEG): container finished" podID="90a5f03e-7c3b-4264-ae15-83ce4dd9d890" containerID="a86c63121fd104cc0b20de960528c4dc7adba56889369380e4af08df22101756" exitCode=0 Oct 13 07:45:02 crc kubenswrapper[4664]: I1013 07:45:02.938294 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" event={"ID":"90a5f03e-7c3b-4264-ae15-83ce4dd9d890","Type":"ContainerDied","Data":"a86c63121fd104cc0b20de960528c4dc7adba56889369380e4af08df22101756"} Oct 13 07:45:03 crc kubenswrapper[4664]: I1013 07:45:03.497445 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dpvmf" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" probeResult="failure" output=< Oct 13 07:45:03 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:45:03 crc kubenswrapper[4664]: > Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.526330 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.674586 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-config-volume\") pod \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.674647 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-secret-volume\") pod \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.674753 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdcqm\" (UniqueName: \"kubernetes.io/projected/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-kube-api-access-tdcqm\") pod \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\" (UID: \"90a5f03e-7c3b-4264-ae15-83ce4dd9d890\") " Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.696563 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-config-volume" (OuterVolumeSpecName: "config-volume") pod "90a5f03e-7c3b-4264-ae15-83ce4dd9d890" (UID: "90a5f03e-7c3b-4264-ae15-83ce4dd9d890"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.709492 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "90a5f03e-7c3b-4264-ae15-83ce4dd9d890" (UID: "90a5f03e-7c3b-4264-ae15-83ce4dd9d890"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.712242 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-kube-api-access-tdcqm" (OuterVolumeSpecName: "kube-api-access-tdcqm") pod "90a5f03e-7c3b-4264-ae15-83ce4dd9d890" (UID: "90a5f03e-7c3b-4264-ae15-83ce4dd9d890"). InnerVolumeSpecName "kube-api-access-tdcqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.776844 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.776878 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.776888 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdcqm\" (UniqueName: \"kubernetes.io/projected/90a5f03e-7c3b-4264-ae15-83ce4dd9d890-kube-api-access-tdcqm\") on node \"crc\" DevicePath \"\"" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.957180 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" event={"ID":"90a5f03e-7c3b-4264-ae15-83ce4dd9d890","Type":"ContainerDied","Data":"862b02f9e853816bc333c7d968dd6a20e2b44eeb5985575ca16978b158557ac3"} Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.957422 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269" Oct 13 07:45:04 crc kubenswrapper[4664]: I1013 07:45:04.958166 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="862b02f9e853816bc333c7d968dd6a20e2b44eeb5985575ca16978b158557ac3" Oct 13 07:45:05 crc kubenswrapper[4664]: I1013 07:45:05.651421 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5"] Oct 13 07:45:05 crc kubenswrapper[4664]: I1013 07:45:05.661995 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338980-sgwn5"] Oct 13 07:45:07 crc kubenswrapper[4664]: I1013 07:45:07.062704 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dd88a20-51cc-4081-9d90-653d6867555d" path="/var/lib/kubelet/pods/1dd88a20-51cc-4081-9d90-653d6867555d/volumes" Oct 13 07:45:13 crc kubenswrapper[4664]: I1013 07:45:13.521816 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dpvmf" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" probeResult="failure" output=< Oct 13 07:45:13 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:45:13 crc kubenswrapper[4664]: > Oct 13 07:45:23 crc kubenswrapper[4664]: I1013 07:45:23.497115 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dpvmf" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" probeResult="failure" output=< Oct 13 07:45:23 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:45:23 crc kubenswrapper[4664]: > Oct 13 07:45:33 crc kubenswrapper[4664]: I1013 07:45:33.507286 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dpvmf" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" probeResult="failure" output=< Oct 13 07:45:33 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:45:33 crc kubenswrapper[4664]: > Oct 13 07:45:40 crc kubenswrapper[4664]: I1013 07:45:40.626630 4664 scope.go:117] "RemoveContainer" containerID="8a88c93bb84ca02f583213f0a0adc3b4f1497639e8034a8529b58427db54e400" Oct 13 07:45:42 crc kubenswrapper[4664]: I1013 07:45:42.540318 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:45:42 crc kubenswrapper[4664]: I1013 07:45:42.596458 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:45:42 crc kubenswrapper[4664]: I1013 07:45:42.806529 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dpvmf"] Oct 13 07:45:44 crc kubenswrapper[4664]: I1013 07:45:44.322382 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dpvmf" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" containerID="cri-o://42618cd901a15851321267a2b2141dceb6f48ea479dc98e1f1cc507bd6789558" gracePeriod=2 Oct 13 07:45:45 crc kubenswrapper[4664]: I1013 07:45:45.361631 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dpvmf" event={"ID":"a6a06062-3ef8-4278-a633-f1158d79a843","Type":"ContainerDied","Data":"42618cd901a15851321267a2b2141dceb6f48ea479dc98e1f1cc507bd6789558"} Oct 13 07:45:45 crc kubenswrapper[4664]: I1013 07:45:45.361858 4664 generic.go:334] "Generic (PLEG): container finished" podID="a6a06062-3ef8-4278-a633-f1158d79a843" containerID="42618cd901a15851321267a2b2141dceb6f48ea479dc98e1f1cc507bd6789558" exitCode=0 Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.096885 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.232686 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-utilities\") pod \"a6a06062-3ef8-4278-a633-f1158d79a843\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.232912 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-catalog-content\") pod \"a6a06062-3ef8-4278-a633-f1158d79a843\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.233143 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-699wb\" (UniqueName: \"kubernetes.io/projected/a6a06062-3ef8-4278-a633-f1158d79a843-kube-api-access-699wb\") pod \"a6a06062-3ef8-4278-a633-f1158d79a843\" (UID: \"a6a06062-3ef8-4278-a633-f1158d79a843\") " Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.242360 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-utilities" (OuterVolumeSpecName: "utilities") pod "a6a06062-3ef8-4278-a633-f1158d79a843" (UID: "a6a06062-3ef8-4278-a633-f1158d79a843"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.292223 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6a06062-3ef8-4278-a633-f1158d79a843-kube-api-access-699wb" (OuterVolumeSpecName: "kube-api-access-699wb") pod "a6a06062-3ef8-4278-a633-f1158d79a843" (UID: "a6a06062-3ef8-4278-a633-f1158d79a843"). InnerVolumeSpecName "kube-api-access-699wb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.335958 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-699wb\" (UniqueName: \"kubernetes.io/projected/a6a06062-3ef8-4278-a633-f1158d79a843-kube-api-access-699wb\") on node \"crc\" DevicePath \"\"" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.335990 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.383373 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dpvmf" event={"ID":"a6a06062-3ef8-4278-a633-f1158d79a843","Type":"ContainerDied","Data":"a2291c2cf8bb3c260be568d19b5a5ac4ba1e4e59336cd13fc2003730078dfa9f"} Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.383702 4664 scope.go:117] "RemoveContainer" containerID="42618cd901a15851321267a2b2141dceb6f48ea479dc98e1f1cc507bd6789558" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.383866 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dpvmf" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.429217 4664 scope.go:117] "RemoveContainer" containerID="bd5752a7c269c796e0b27e3067421aa92e205eedaf6bc7c1b8d445ed1b99e9d7" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.481639 4664 scope.go:117] "RemoveContainer" containerID="ab318a3e3bf5ad05bc492bc77a4ac94dbbe8fac2a1c0c654623637eea2a4789b" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.518958 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6a06062-3ef8-4278-a633-f1158d79a843" (UID: "a6a06062-3ef8-4278-a633-f1158d79a843"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.539422 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6a06062-3ef8-4278-a633-f1158d79a843-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.744704 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dpvmf"] Oct 13 07:45:47 crc kubenswrapper[4664]: I1013 07:45:47.753952 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dpvmf"] Oct 13 07:45:49 crc kubenswrapper[4664]: I1013 07:45:49.060907 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" path="/var/lib/kubelet/pods/a6a06062-3ef8-4278-a633-f1158d79a843/volumes" Oct 13 07:45:58 crc kubenswrapper[4664]: I1013 07:45:58.811619 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:45:58 crc kubenswrapper[4664]: I1013 07:45:58.812945 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:46:28 crc kubenswrapper[4664]: I1013 07:46:28.811984 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:46:28 crc kubenswrapper[4664]: I1013 07:46:28.812565 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:46:58 crc kubenswrapper[4664]: I1013 07:46:58.813638 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:46:58 crc kubenswrapper[4664]: I1013 07:46:58.815725 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:46:58 crc kubenswrapper[4664]: I1013 07:46:58.817638 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:46:58 crc kubenswrapper[4664]: I1013 07:46:58.820854 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ee5925000477c21de7da6a1d5e4433bf54c310755c11f6e082f8a63ffd438dad"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:46:58 crc kubenswrapper[4664]: I1013 07:46:58.821236 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://ee5925000477c21de7da6a1d5e4433bf54c310755c11f6e082f8a63ffd438dad" gracePeriod=600 Oct 13 07:46:59 crc kubenswrapper[4664]: I1013 07:46:59.066899 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"ee5925000477c21de7da6a1d5e4433bf54c310755c11f6e082f8a63ffd438dad"} Oct 13 07:46:59 crc kubenswrapper[4664]: I1013 07:46:59.066707 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="ee5925000477c21de7da6a1d5e4433bf54c310755c11f6e082f8a63ffd438dad" exitCode=0 Oct 13 07:46:59 crc kubenswrapper[4664]: I1013 07:46:59.070239 4664 scope.go:117] "RemoveContainer" containerID="7959ff26c99e104e48ef2c3d94c2598f0c3723201ef5084fd9c2dc3b24191b19" Oct 13 07:47:00 crc kubenswrapper[4664]: I1013 07:47:00.087081 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3"} Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.364379 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q7fwd"] Oct 13 07:49:14 crc kubenswrapper[4664]: E1013 07:49:14.373311 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90a5f03e-7c3b-4264-ae15-83ce4dd9d890" containerName="collect-profiles" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.373723 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="90a5f03e-7c3b-4264-ae15-83ce4dd9d890" containerName="collect-profiles" Oct 13 07:49:14 crc kubenswrapper[4664]: E1013 07:49:14.373769 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="extract-content" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.373778 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="extract-content" Oct 13 07:49:14 crc kubenswrapper[4664]: E1013 07:49:14.373879 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="extract-utilities" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.373891 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="extract-utilities" Oct 13 07:49:14 crc kubenswrapper[4664]: E1013 07:49:14.373912 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.373921 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.376040 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6a06062-3ef8-4278-a633-f1158d79a843" containerName="registry-server" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.376080 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="90a5f03e-7c3b-4264-ae15-83ce4dd9d890" containerName="collect-profiles" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.387630 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.520826 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-catalog-content\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.520900 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-847x5\" (UniqueName: \"kubernetes.io/projected/2f0168ad-aea2-43fe-b228-1ed1bff808dc-kube-api-access-847x5\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.521094 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-utilities\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.623438 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-catalog-content\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.623747 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-847x5\" (UniqueName: \"kubernetes.io/projected/2f0168ad-aea2-43fe-b228-1ed1bff808dc-kube-api-access-847x5\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.623876 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-utilities\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.634488 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-utilities\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.635877 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-catalog-content\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.672435 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-847x5\" (UniqueName: \"kubernetes.io/projected/2f0168ad-aea2-43fe-b228-1ed1bff808dc-kube-api-access-847x5\") pod \"community-operators-q7fwd\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.738005 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7fwd"] Oct 13 07:49:14 crc kubenswrapper[4664]: I1013 07:49:14.769351 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:16 crc kubenswrapper[4664]: I1013 07:49:16.586130 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7fwd"] Oct 13 07:49:17 crc kubenswrapper[4664]: I1013 07:49:17.404734 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7fwd" event={"ID":"2f0168ad-aea2-43fe-b228-1ed1bff808dc","Type":"ContainerDied","Data":"04bcdfb9cb4f8a68cb63628142a581a42254789cca372a124b61fac3eb26493d"} Oct 13 07:49:17 crc kubenswrapper[4664]: I1013 07:49:17.405680 4664 generic.go:334] "Generic (PLEG): container finished" podID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerID="04bcdfb9cb4f8a68cb63628142a581a42254789cca372a124b61fac3eb26493d" exitCode=0 Oct 13 07:49:17 crc kubenswrapper[4664]: I1013 07:49:17.406629 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7fwd" event={"ID":"2f0168ad-aea2-43fe-b228-1ed1bff808dc","Type":"ContainerStarted","Data":"13b3ff29c3dd4c2e044eadf57d96ae9413c17237e3146965a3b18083cdbd91ec"} Oct 13 07:49:17 crc kubenswrapper[4664]: I1013 07:49:17.420062 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 07:49:19 crc kubenswrapper[4664]: I1013 07:49:19.431546 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7fwd" event={"ID":"2f0168ad-aea2-43fe-b228-1ed1bff808dc","Type":"ContainerStarted","Data":"d5fb3bd0fb68520c3ce5cd82442b2856b6bf2ddfdd4468e2701f8ab7dade1919"} Oct 13 07:49:22 crc kubenswrapper[4664]: I1013 07:49:22.463168 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7fwd" event={"ID":"2f0168ad-aea2-43fe-b228-1ed1bff808dc","Type":"ContainerDied","Data":"d5fb3bd0fb68520c3ce5cd82442b2856b6bf2ddfdd4468e2701f8ab7dade1919"} Oct 13 07:49:22 crc kubenswrapper[4664]: I1013 07:49:22.463645 4664 generic.go:334] "Generic (PLEG): container finished" podID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerID="d5fb3bd0fb68520c3ce5cd82442b2856b6bf2ddfdd4468e2701f8ab7dade1919" exitCode=0 Oct 13 07:49:24 crc kubenswrapper[4664]: I1013 07:49:24.483648 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7fwd" event={"ID":"2f0168ad-aea2-43fe-b228-1ed1bff808dc","Type":"ContainerStarted","Data":"32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41"} Oct 13 07:49:24 crc kubenswrapper[4664]: I1013 07:49:24.504308 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q7fwd" podStartSLOduration=4.685523877 podStartE2EDuration="10.501056042s" podCreationTimestamp="2025-10-13 07:49:14 +0000 UTC" firstStartedPulling="2025-10-13 07:49:17.408774171 +0000 UTC m=+3765.096219363" lastFinishedPulling="2025-10-13 07:49:23.224306336 +0000 UTC m=+3770.911751528" observedRunningTime="2025-10-13 07:49:24.500510707 +0000 UTC m=+3772.187955899" watchObservedRunningTime="2025-10-13 07:49:24.501056042 +0000 UTC m=+3772.188501234" Oct 13 07:49:24 crc kubenswrapper[4664]: I1013 07:49:24.773300 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:24 crc kubenswrapper[4664]: I1013 07:49:24.773779 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:49:25 crc kubenswrapper[4664]: I1013 07:49:25.834726 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-q7fwd" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" probeResult="failure" output=< Oct 13 07:49:25 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:49:25 crc kubenswrapper[4664]: > Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.551890 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dvt4k"] Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.568378 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.669081 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qst7k\" (UniqueName: \"kubernetes.io/projected/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-kube-api-access-qst7k\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.669427 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-catalog-content\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.669444 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-utilities\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.771786 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qst7k\" (UniqueName: \"kubernetes.io/projected/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-kube-api-access-qst7k\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.771849 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-catalog-content\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.771870 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-utilities\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.780031 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-utilities\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.780670 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-catalog-content\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.814185 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dvt4k"] Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.812016 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.814442 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.822813 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qst7k\" (UniqueName: \"kubernetes.io/projected/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-kube-api-access-qst7k\") pod \"certified-operators-dvt4k\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:28 crc kubenswrapper[4664]: I1013 07:49:28.917492 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:30 crc kubenswrapper[4664]: I1013 07:49:30.941038 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dvt4k"] Oct 13 07:49:31 crc kubenswrapper[4664]: I1013 07:49:31.584344 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dvt4k" event={"ID":"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2","Type":"ContainerDied","Data":"b30feba35d4ea9cf6ef086a997c511d7bbcffd327939bd1d6b1d8e18466c1296"} Oct 13 07:49:31 crc kubenswrapper[4664]: I1013 07:49:31.586150 4664 generic.go:334] "Generic (PLEG): container finished" podID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerID="b30feba35d4ea9cf6ef086a997c511d7bbcffd327939bd1d6b1d8e18466c1296" exitCode=0 Oct 13 07:49:31 crc kubenswrapper[4664]: I1013 07:49:31.586324 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dvt4k" event={"ID":"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2","Type":"ContainerStarted","Data":"b90487450fca13daebc65d525c805c253c734b933c2e23f3164bf22b9a01abd0"} Oct 13 07:49:33 crc kubenswrapper[4664]: I1013 07:49:33.664363 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dvt4k" event={"ID":"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2","Type":"ContainerStarted","Data":"752fd808130327cbd83c230cec49efdc23f071a011fd6644c7cfba334957aa46"} Oct 13 07:49:35 crc kubenswrapper[4664]: I1013 07:49:35.827181 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-q7fwd" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" probeResult="failure" output=< Oct 13 07:49:35 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:49:35 crc kubenswrapper[4664]: > Oct 13 07:49:36 crc kubenswrapper[4664]: I1013 07:49:36.692650 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dvt4k" event={"ID":"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2","Type":"ContainerDied","Data":"752fd808130327cbd83c230cec49efdc23f071a011fd6644c7cfba334957aa46"} Oct 13 07:49:36 crc kubenswrapper[4664]: I1013 07:49:36.694612 4664 generic.go:334] "Generic (PLEG): container finished" podID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerID="752fd808130327cbd83c230cec49efdc23f071a011fd6644c7cfba334957aa46" exitCode=0 Oct 13 07:49:38 crc kubenswrapper[4664]: I1013 07:49:38.715721 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dvt4k" event={"ID":"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2","Type":"ContainerStarted","Data":"78d9f1e1f569a5fff0e7ed94d9b1af55ab3a054b299cc5beea5c968045323a25"} Oct 13 07:49:38 crc kubenswrapper[4664]: I1013 07:49:38.757584 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dvt4k" podStartSLOduration=4.763450113 podStartE2EDuration="10.7540914s" podCreationTimestamp="2025-10-13 07:49:28 +0000 UTC" firstStartedPulling="2025-10-13 07:49:31.587384672 +0000 UTC m=+3779.274829874" lastFinishedPulling="2025-10-13 07:49:37.578025969 +0000 UTC m=+3785.265471161" observedRunningTime="2025-10-13 07:49:38.744974613 +0000 UTC m=+3786.432419825" watchObservedRunningTime="2025-10-13 07:49:38.7540914 +0000 UTC m=+3786.441536592" Oct 13 07:49:38 crc kubenswrapper[4664]: I1013 07:49:38.918284 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:38 crc kubenswrapper[4664]: I1013 07:49:38.918339 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:49:40 crc kubenswrapper[4664]: I1013 07:49:40.054860 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:49:40 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:49:40 crc kubenswrapper[4664]: > Oct 13 07:49:45 crc kubenswrapper[4664]: I1013 07:49:45.859095 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-q7fwd" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" probeResult="failure" output=< Oct 13 07:49:45 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:49:45 crc kubenswrapper[4664]: > Oct 13 07:49:50 crc kubenswrapper[4664]: I1013 07:49:50.147584 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:49:50 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:49:50 crc kubenswrapper[4664]: > Oct 13 07:49:55 crc kubenswrapper[4664]: I1013 07:49:55.831292 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-q7fwd" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" probeResult="failure" output=< Oct 13 07:49:55 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:49:55 crc kubenswrapper[4664]: > Oct 13 07:49:58 crc kubenswrapper[4664]: I1013 07:49:58.811832 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:49:58 crc kubenswrapper[4664]: I1013 07:49:58.816764 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:50:00 crc kubenswrapper[4664]: I1013 07:50:00.091383 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:00 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:00 crc kubenswrapper[4664]: > Oct 13 07:50:04 crc kubenswrapper[4664]: I1013 07:50:04.948294 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:50:05 crc kubenswrapper[4664]: I1013 07:50:05.039299 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.193281 4664 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-gkm66 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.197502 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" podUID="4e2fa3b0-29e6-4bdd-aed7-82cc6690b549" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.655598 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.656000 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.655661 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.656094 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.737664 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7fwd"] Oct 13 07:50:06 crc kubenswrapper[4664]: I1013 07:50:06.752842 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q7fwd" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" containerID="cri-o://32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41" gracePeriod=2 Oct 13 07:50:07 crc kubenswrapper[4664]: I1013 07:50:07.540210 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:07 crc kubenswrapper[4664]: I1013 07:50:07.540202 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:07 crc kubenswrapper[4664]: I1013 07:50:07.684026 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" podUID="b371ef2b-6ffd-4759-8a02-279705b4a4d3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.082048 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7fwd" event={"ID":"2f0168ad-aea2-43fe-b228-1ed1bff808dc","Type":"ContainerDied","Data":"32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41"} Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.082871 4664 generic.go:334] "Generic (PLEG): container finished" podID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerID="32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41" exitCode=0 Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.314998 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.315055 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.356007 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" podUID="24e25e0a-a138-41fb-b90e-08d800f751b4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.447101 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.448887 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.447113 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.449208 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.749057 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" podUID="6647aa13-3608-4eeb-87b7-26741b9c2a6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:08 crc kubenswrapper[4664]: I1013 07:50:08.789949 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:09 crc kubenswrapper[4664]: I1013 07:50:09.525162 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-bsghg" podUID="82047d93-7f79-495b-9e1a-380994104bb0" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:09 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:09 crc kubenswrapper[4664]: > Oct 13 07:50:09 crc kubenswrapper[4664]: I1013 07:50:09.525398 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-bsghg" podUID="82047d93-7f79-495b-9e1a-380994104bb0" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:09 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:09 crc kubenswrapper[4664]: > Oct 13 07:50:09 crc kubenswrapper[4664]: I1013 07:50:09.783387 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:09 crc kubenswrapper[4664]: I1013 07:50:09.784836 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:09 crc kubenswrapper[4664]: I1013 07:50:09.924019 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:09 crc kubenswrapper[4664]: I1013 07:50:09.924006 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:10 crc kubenswrapper[4664]: I1013 07:50:10.241082 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podUID="4f2dd003-654f-4e3d-9fb9-cbea80c68acd" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.65:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:10 crc kubenswrapper[4664]: I1013 07:50:10.241147 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podUID="4f2dd003-654f-4e3d-9fb9-cbea80c68acd" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.65:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:10 crc kubenswrapper[4664]: I1013 07:50:10.809334 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": context deadline exceeded" start-of-body= Oct 13 07:50:10 crc kubenswrapper[4664]: I1013 07:50:10.809351 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:10 crc kubenswrapper[4664]: I1013 07:50:10.811153 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": context deadline exceeded" Oct 13 07:50:10 crc kubenswrapper[4664]: I1013 07:50:10.811173 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:10 crc kubenswrapper[4664]: I1013 07:50:10.923324 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:10 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:10 crc kubenswrapper[4664]: > Oct 13 07:50:11 crc kubenswrapper[4664]: I1013 07:50:11.466837 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:11 crc kubenswrapper[4664]: I1013 07:50:11.466841 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:11 crc kubenswrapper[4664]: I1013 07:50:11.467443 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:11 crc kubenswrapper[4664]: I1013 07:50:11.467486 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:14 crc kubenswrapper[4664]: E1013 07:50:14.780890 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41 is running failed: container process not found" containerID="32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41" cmd=["grpc_health_probe","-addr=:50051"] Oct 13 07:50:14 crc kubenswrapper[4664]: E1013 07:50:14.785070 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41 is running failed: container process not found" containerID="32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41" cmd=["grpc_health_probe","-addr=:50051"] Oct 13 07:50:14 crc kubenswrapper[4664]: E1013 07:50:14.785335 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41 is running failed: container process not found" containerID="32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41" cmd=["grpc_health_probe","-addr=:50051"] Oct 13 07:50:14 crc kubenswrapper[4664]: E1013 07:50:14.790345 4664 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-q7fwd" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.361054 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.426529 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-utilities\") pod \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.426624 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-847x5\" (UniqueName: \"kubernetes.io/projected/2f0168ad-aea2-43fe-b228-1ed1bff808dc-kube-api-access-847x5\") pod \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.426670 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-catalog-content\") pod \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\" (UID: \"2f0168ad-aea2-43fe-b228-1ed1bff808dc\") " Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.430016 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-utilities" (OuterVolumeSpecName: "utilities") pod "2f0168ad-aea2-43fe-b228-1ed1bff808dc" (UID: "2f0168ad-aea2-43fe-b228-1ed1bff808dc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.459460 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f0168ad-aea2-43fe-b228-1ed1bff808dc-kube-api-access-847x5" (OuterVolumeSpecName: "kube-api-access-847x5") pod "2f0168ad-aea2-43fe-b228-1ed1bff808dc" (UID: "2f0168ad-aea2-43fe-b228-1ed1bff808dc"). InnerVolumeSpecName "kube-api-access-847x5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.529399 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-847x5\" (UniqueName: \"kubernetes.io/projected/2f0168ad-aea2-43fe-b228-1ed1bff808dc-kube-api-access-847x5\") on node \"crc\" DevicePath \"\"" Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.529448 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.778943 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f0168ad-aea2-43fe-b228-1ed1bff808dc" (UID: "2f0168ad-aea2-43fe-b228-1ed1bff808dc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:50:17 crc kubenswrapper[4664]: I1013 07:50:17.835272 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f0168ad-aea2-43fe-b228-1ed1bff808dc-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.232598 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7fwd" event={"ID":"2f0168ad-aea2-43fe-b228-1ed1bff808dc","Type":"ContainerDied","Data":"13b3ff29c3dd4c2e044eadf57d96ae9413c17237e3146965a3b18083cdbd91ec"} Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.236276 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7fwd" Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.238214 4664 scope.go:117] "RemoveContainer" containerID="32ada8533a6cd647a6390464ff0991985b8842ec73523b60643f6ffd05067e41" Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.454995 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.454998 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.460671 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.460666 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.487985 4664 scope.go:117] "RemoveContainer" containerID="d5fb3bd0fb68520c3ce5cd82442b2856b6bf2ddfdd4468e2701f8ab7dade1919" Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.574901 4664 scope.go:117] "RemoveContainer" containerID="04bcdfb9cb4f8a68cb63628142a581a42254789cca372a124b61fac3eb26493d" Oct 13 07:50:18 crc kubenswrapper[4664]: I1013 07:50:18.831022 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:19 crc kubenswrapper[4664]: I1013 07:50:19.830624 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:19 crc kubenswrapper[4664]: I1013 07:50:19.830587 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:19 crc kubenswrapper[4664]: I1013 07:50:19.940897 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:19 crc kubenswrapper[4664]: I1013 07:50:19.940917 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:20 crc kubenswrapper[4664]: I1013 07:50:20.224294 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podUID="4f2dd003-654f-4e3d-9fb9-cbea80c68acd" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.65:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:20 crc kubenswrapper[4664]: I1013 07:50:20.224475 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podUID="4f2dd003-654f-4e3d-9fb9-cbea80c68acd" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.65:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:20 crc kubenswrapper[4664]: I1013 07:50:20.811489 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:20 crc kubenswrapper[4664]: I1013 07:50:20.811485 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": context deadline exceeded" start-of-body= Oct 13 07:50:20 crc kubenswrapper[4664]: I1013 07:50:20.829737 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:20 crc kubenswrapper[4664]: I1013 07:50:20.829831 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": context deadline exceeded" Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.152642 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:21 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:21 crc kubenswrapper[4664]: > Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.383427 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.383847 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.392716 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.392782 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.601003 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" podUID="55e9e787-3f8b-4a88-8693-7e0b265b4724" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.642326 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:21 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:21 crc kubenswrapper[4664]: > Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.646710 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:21 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:21 crc kubenswrapper[4664]: > Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.784180 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:21 crc kubenswrapper[4664]: I1013 07:50:21.797393 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:22 crc kubenswrapper[4664]: I1013 07:50:22.542115 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7fwd"] Oct 13 07:50:22 crc kubenswrapper[4664]: I1013 07:50:22.638826 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q7fwd"] Oct 13 07:50:23 crc kubenswrapper[4664]: I1013 07:50:23.088743 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" path="/var/lib/kubelet/pods/2f0168ad-aea2-43fe-b228-1ed1bff808dc/volumes" Oct 13 07:50:23 crc kubenswrapper[4664]: I1013 07:50:23.846619 4664 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:23 crc kubenswrapper[4664]: I1013 07:50:23.861135 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:23 crc kubenswrapper[4664]: I1013 07:50:23.866653 4664 patch_prober.go:28] interesting pod/nmstate-webhook-6cdbc54649-kwpkw container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.40:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:23 crc kubenswrapper[4664]: I1013 07:50:23.866695 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" podUID="d5f7b13a-10cb-44b2-89b1-e434b8f81923" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.40:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.218308 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podUID="4f2dd003-654f-4e3d-9fb9-cbea80c68acd" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.65:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.585528 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.585528 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.587700 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.587766 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.630326 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-bbwgq" podUID="eef97848-b083-4ac5-a9bd-5b8f047b420b" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:25 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:25 crc kubenswrapper[4664]: > Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.630445 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-bbwgq" podUID="eef97848-b083-4ac5-a9bd-5b8f047b420b" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:25 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:25 crc kubenswrapper[4664]: > Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.630326 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/community-operators-5hrqb" podUID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:25 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:25 crc kubenswrapper[4664]: > Oct 13 07:50:25 crc kubenswrapper[4664]: I1013 07:50:25.631106 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-5hrqb" podUID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:25 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:25 crc kubenswrapper[4664]: > Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.147350 4664 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-gkm66 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.147442 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" podUID="4e2fa3b0-29e6-4bdd-aed7-82cc6690b549" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.578825 4664 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.578884 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.579115 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.579148 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.579310 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.579327 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.632943 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.633004 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674044 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674102 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674164 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674116 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674185 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674204 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674565 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674593 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674636 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:26 crc kubenswrapper[4664]: I1013 07:50:26.674656 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.121482 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-85599f4f6-7xvrk" podUID="c5b636b9-85bf-4ebc-8fea-04f2bc895d6a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.47:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.217305 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" podUID="deda5c15-ffb4-44e6-9e27-465106737111" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.75:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.466850 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-m4bpt" podUID="9f6b6a3e-5706-49fa-aafa-49f68b19997e" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.73:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.549057 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.549071 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.672100 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" podUID="bd3c97ad-17c7-47d7-ae5e-1a67c489c142" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.77:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.672478 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" podUID="5daf4cb7-d305-4408-97d8-9645cd4e61d5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.672139 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" podUID="b28d60cb-ff14-4b64-b7b0-3af252c60311" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.749203 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" podUID="de55f4c7-2413-4a00-8691-a9545525fc88" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.749751 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" podUID="b371ef2b-6ffd-4759-8a02-279705b4a4d3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.749697 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.749619 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.751563 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.751565 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.790295 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Oct 13 07:50:27 crc kubenswrapper[4664]: I1013 07:50:27.972027 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.082115 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" podUID="3ae920d7-c605-4984-a072-dad04b3cc6cc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.318510 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.318526 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.359251 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" podUID="24e25e0a-a138-41fb-b90e-08d800f751b4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.446915 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.446995 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.447012 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.447064 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.448369 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.451129 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="route-controller-manager" containerStatusID={"Type":"cri-o","ID":"86ad2da63cb4bb3a19f24720fca38fae04d7c18ad2205409a68baa72dd2fa28c"} pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" containerMessage="Container route-controller-manager failed liveness probe, will be restarted" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.451852 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" containerID="cri-o://86ad2da63cb4bb3a19f24720fca38fae04d7c18ad2205409a68baa72dd2fa28c" gracePeriod=30 Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.749614 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" podUID="6647aa13-3608-4eeb-87b7-26741b9c2a6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.912041 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.912142 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.912143 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.912232 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.920841 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.921230 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.921286 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.922614 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="frr" containerStatusID={"Type":"cri-o","ID":"afaacd9d78172fd3db8975ce43ec5762bb73e1075885515b31c0dbc560938ad9"} pod="metallb-system/frr-k8s-7pjp7" containerMessage="Container frr failed liveness probe, will be restarted" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.922815 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="frr" containerID="cri-o://afaacd9d78172fd3db8975ce43ec5762bb73e1075885515b31c0dbc560938ad9" gracePeriod=2 Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.923167 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:50:28 crc kubenswrapper[4664]: I1013 07:50:28.923234 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" gracePeriod=600 Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.439016 4664 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rhmxl container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.439078 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" podUID="acb034b5-2645-458a-91ae-14c42b6632b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.439016 4664 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rhmxl container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.439131 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" podUID="acb034b5-2645-458a-91ae-14c42b6632b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.531900 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-operators-pt8xd" podUID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:29 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:29 crc kubenswrapper[4664]: > Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.531896 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-pt8xd" podUID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:29 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:29 crc kubenswrapper[4664]: > Oct 13 07:50:29 crc kubenswrapper[4664]: E1013 07:50:29.681566 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.686764 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3"} Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.687453 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" exitCode=0 Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.693250 4664 scope.go:117] "RemoveContainer" containerID="ee5925000477c21de7da6a1d5e4433bf54c310755c11f6e082f8a63ffd438dad" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.696490 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerDied","Data":"afaacd9d78172fd3db8975ce43ec5762bb73e1075885515b31c0dbc560938ad9"} Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.696405 4664 generic.go:334] "Generic (PLEG): container finished" podID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerID="afaacd9d78172fd3db8975ce43ec5762bb73e1075885515b31c0dbc560938ad9" exitCode=143 Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.784070 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.784244 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.784258 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.784323 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-galera-0" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.787227 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42"} pod="openstack/openstack-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.924979 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.925129 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-sngcf" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.925635 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.925677 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/speaker-sngcf" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.927271 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="speaker" containerStatusID={"Type":"cri-o","ID":"58fa53e01c21928fa21c2ac0c8b9cb13b72feb68c5a50198f9377fd2c0bf6f37"} pod="metallb-system/speaker-sngcf" containerMessage="Container speaker failed liveness probe, will be restarted" Oct 13 07:50:29 crc kubenswrapper[4664]: I1013 07:50:29.927344 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" containerID="cri-o://58fa53e01c21928fa21c2ac0c8b9cb13b72feb68c5a50198f9377fd2c0bf6f37" gracePeriod=2 Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.216308 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-sngcf" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.222822 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:30 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:30 crc kubenswrapper[4664]: > Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.525080 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-bsghg" podUID="82047d93-7f79-495b-9e1a-380994104bb0" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:30 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:30 crc kubenswrapper[4664]: > Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.532702 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-bsghg" podUID="82047d93-7f79-495b-9e1a-380994104bb0" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:30 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:30 crc kubenswrapper[4664]: > Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.686421 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.687847 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.686404 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.687897 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.783246 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.783928 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"53a0526c87d580584ce17563c9600dc6a31833763296a28fc6564d7d9216d1f9"} Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.789244 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:50:30 crc kubenswrapper[4664]: E1013 07:50:30.790444 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.817950 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.817963 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.818204 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.818246 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.818313 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.819785 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller-manager" containerStatusID={"Type":"cri-o","ID":"1a77fb8fc8046104fd1287258b7eb0cbc1809f9fb244d1b9663617926015678f"} pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" containerMessage="Container controller-manager failed liveness probe, will be restarted" Oct 13 07:50:30 crc kubenswrapper[4664]: I1013 07:50:30.819951 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" containerID="cri-o://1a77fb8fc8046104fd1287258b7eb0cbc1809f9fb244d1b9663617926015678f" gracePeriod=30 Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.382365 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.382415 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.382429 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.382460 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.382515 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.382539 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.383268 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="oauth-openshift" containerStatusID={"Type":"cri-o","ID":"f91e5cd8608543258f8cfb67e529651e03a24e1a5451ea212f0171b0310c68d8"} pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" containerMessage="Container oauth-openshift failed liveness probe, will be restarted" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.512715 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" podUID="55e9e787-3f8b-4a88-8693-7e0b265b4724" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.785149 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.786357 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.803709 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sngcf" event={"ID":"54a84985-f353-4f50-aec1-d7b5501c1d2c","Type":"ContainerDied","Data":"58fa53e01c21928fa21c2ac0c8b9cb13b72feb68c5a50198f9377fd2c0bf6f37"} Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.808901 4664 generic.go:334] "Generic (PLEG): container finished" podID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerID="58fa53e01c21928fa21c2ac0c8b9cb13b72feb68c5a50198f9377fd2c0bf6f37" exitCode=0 Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.823059 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:31 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:31 crc kubenswrapper[4664]: > Oct 13 07:50:31 crc kubenswrapper[4664]: I1013 07:50:31.827019 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:31 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:31 crc kubenswrapper[4664]: > Oct 13 07:50:32 crc kubenswrapper[4664]: I1013 07:50:32.429296 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:32 crc kubenswrapper[4664]: I1013 07:50:32.437747 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:32 crc kubenswrapper[4664]: I1013 07:50:32.746669 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="43ff3dc8-8a2c-4051-813b-69406ed7359e" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.0.212:8080/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:32 crc kubenswrapper[4664]: I1013 07:50:32.789698 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:50:32 crc kubenswrapper[4664]: I1013 07:50:32.791406 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.043074 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.640782 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.640836 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.641077 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.641144 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.641212 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.641302 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.644274 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"36bed393495904507ddabd8cc59b5eaedcf2f31bdcc2ff39a3d28dca2eb91ba9"} pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.644637 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" containerID="cri-o://36bed393495904507ddabd8cc59b5eaedcf2f31bdcc2ff39a3d28dca2eb91ba9" gracePeriod=30 Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.789710 4664 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.789786 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.909064 4664 patch_prober.go:28] interesting pod/nmstate-webhook-6cdbc54649-kwpkw container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.40:9443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:33 crc kubenswrapper[4664]: I1013 07:50:33.909127 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" podUID="d5f7b13a-10cb-44b2-89b1-e434b8f81923" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.40:9443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:34 crc kubenswrapper[4664]: I1013 07:50:34.280085 4664 patch_prober.go:28] interesting pod/console-54c9bfc7b6-rvkt8 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.45:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:34 crc kubenswrapper[4664]: I1013 07:50:34.280386 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-54c9bfc7b6-rvkt8" podUID="e9fa90fd-fb25-4656-b2ee-e4788866cf6d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.45:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:34 crc kubenswrapper[4664]: I1013 07:50:34.683972 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:34 crc kubenswrapper[4664]: I1013 07:50:34.684040 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:34 crc kubenswrapper[4664]: I1013 07:50:34.862778 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-sngcf" event={"ID":"54a84985-f353-4f50-aec1-d7b5501c1d2c","Type":"ContainerStarted","Data":"f47273ec7019f21835a37cfc509085dd4fc3452fb282b8d9b179dcf3fd506ad7"} Oct 13 07:50:34 crc kubenswrapper[4664]: I1013 07:50:34.862862 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-sngcf" Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.667084 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.667378 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.667423 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.667446 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.668892 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.668926 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.872358 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" event={"ID":"5c9254a8-804c-462f-b06a-0016170cb46c","Type":"ContainerDied","Data":"1a77fb8fc8046104fd1287258b7eb0cbc1809f9fb244d1b9663617926015678f"} Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.873610 4664 generic.go:334] "Generic (PLEG): container finished" podID="5c9254a8-804c-462f-b06a-0016170cb46c" containerID="1a77fb8fc8046104fd1287258b7eb0cbc1809f9fb244d1b9663617926015678f" exitCode=0 Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.876575 4664 generic.go:334] "Generic (PLEG): container finished" podID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerID="86ad2da63cb4bb3a19f24720fca38fae04d7c18ad2205409a68baa72dd2fa28c" exitCode=0 Oct 13 07:50:35 crc kubenswrapper[4664]: I1013 07:50:35.877108 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" event={"ID":"5c44ac49-ba1b-4708-bc11-d98ae6be1973","Type":"ContainerDied","Data":"86ad2da63cb4bb3a19f24720fca38fae04d7c18ad2205409a68baa72dd2fa28c"} Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.188080 4664 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-gkm66 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.190505 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" podUID="4e2fa3b0-29e6-4bdd-aed7-82cc6690b549" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.440220 4664 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-mb6jf container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.440304 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" podUID="8051931c-c553-49fb-82bc-f584e6a34ff2" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.440479 4664 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-mb6jf container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.440498 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" podUID="8051931c-c553-49fb-82bc-f584e6a34ff2" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.578062 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.578474 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.578192 4664 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.578557 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.578659 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.578715 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.591224 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": context deadline exceeded" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.591331 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": context deadline exceeded" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.592093 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.592130 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.700975 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.701028 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.701260 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.701308 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.701287 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.701325 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.701528 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.701580 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.889977 4664 generic.go:334] "Generic (PLEG): container finished" podID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerID="36bed393495904507ddabd8cc59b5eaedcf2f31bdcc2ff39a3d28dca2eb91ba9" exitCode=0 Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.890054 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" event={"ID":"65efd88f-be68-494a-a3b8-3a1b2df263d9","Type":"ContainerDied","Data":"36bed393495904507ddabd8cc59b5eaedcf2f31bdcc2ff39a3d28dca2eb91ba9"} Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.893512 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" event={"ID":"5c9254a8-804c-462f-b06a-0016170cb46c","Type":"ContainerStarted","Data":"5ddb69d9afbc9be6822bca999b8779781d3b9c143b0eaf6afb1ad546ba9bbecb"} Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.893720 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.894063 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.894119 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.896375 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" event={"ID":"5c44ac49-ba1b-4708-bc11-d98ae6be1973","Type":"ContainerStarted","Data":"2841f1a84f8d6a59b27f7183e884be961239b0681146edc9ce382df25eb51018"} Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.896633 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.896964 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" start-of-body= Oct 13 07:50:36 crc kubenswrapper[4664]: I1013 07:50:36.897004 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.425628 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/community-operators-5hrqb" podUID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:37 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:37 crc kubenswrapper[4664]: > Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.425945 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-5hrqb" podUID="3960f54e-4fe8-4198-adf1-3aea88880c0f" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:37 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:37 crc kubenswrapper[4664]: > Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.425943 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-bbwgq" podUID="eef97848-b083-4ac5-a9bd-5b8f047b420b" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:37 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:37 crc kubenswrapper[4664]: > Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.428473 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-bbwgq" podUID="eef97848-b083-4ac5-a9bd-5b8f047b420b" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:37 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:37 crc kubenswrapper[4664]: > Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.466071 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" podUID="bf55d8b4-9315-48b2-962c-318911833b6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.466262 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" podUID="bf55d8b4-9315-48b2-962c-318911833b6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.528692 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-pt8xd" podUID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:37 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:37 crc kubenswrapper[4664]: > Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.543396 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-operators-pt8xd" podUID="ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:37 crc kubenswrapper[4664]: timeout: health rpc did not complete within 1s Oct 13 07:50:37 crc kubenswrapper[4664]: > Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.550043 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" podUID="3c368f1e-93f0-440a-ad95-d205dd78e4b2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.74:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.633501 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" podUID="3c368f1e-93f0-440a-ad95-d205dd78e4b2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.74:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.633501 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.633779 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" start-of-body= Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.633805 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.797970 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.797990 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" podUID="b28d60cb-ff14-4b64-b7b0-3af252c60311" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.922060 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" event={"ID":"65efd88f-be68-494a-a3b8-3a1b2df263d9","Type":"ContainerStarted","Data":"f2dd0ed45f74641ce27464256a14b28dd8c445359a85dab6676f5af42ba26336"} Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.922418 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.964063 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" podUID="de55f4c7-2413-4a00-8691-a9545525fc88" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.964077 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" podUID="5daf4cb7-d305-4408-97d8-9645cd4e61d5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.964412 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-5df598886f-mcst5" podUID="b28d60cb-ff14-4b64-b7b0-3af252c60311" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.964676 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" podUID="ccadedcb-9722-4c86-9b22-17d4f9ce1cd7" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.86:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.964828 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-nn6ql" podUID="ccadedcb-9722-4c86-9b22-17d4f9ce1cd7" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.86:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.964879 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" podUID="de55f4c7-2413-4a00-8691-a9545525fc88" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.965035 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" start-of-body= Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.965107 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" Oct 13 07:50:37 crc kubenswrapper[4664]: I1013 07:50:37.964193 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-s6ftn" podUID="5daf4cb7-d305-4408-97d8-9645cd4e61d5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.051979 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" start-of-body= Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.052034 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.052099 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.138538 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.138567 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" podUID="3ae920d7-c605-4984-a072-dad04b3cc6cc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.139570 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-798km" podUID="3ae920d7-c605-4984-a072-dad04b3cc6cc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.313997 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.314086 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.396082 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" podUID="24e25e0a-a138-41fb-b90e-08d800f751b4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.479167 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-68d546b9d8-fhr4v" podUID="84ac3e13-afa3-4136-ba43-738b66c8e84e" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.50:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.479813 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" podUID="24e25e0a-a138-41fb-b90e-08d800f751b4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.480044 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/controller-68d546b9d8-fhr4v" podUID="84ac3e13-afa3-4136-ba43-738b66c8e84e" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.50:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.748683 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" podUID="6647aa13-3608-4eeb-87b7-26741b9c2a6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.789552 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.789700 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.794784 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-central-agent" containerStatusID={"Type":"cri-o","ID":"7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-central-agent failed liveness probe, will be restarted" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.800256 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" containerID="cri-o://7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb" gracePeriod=30 Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.915031 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" podUID="6647aa13-3608-4eeb-87b7-26741b9c2a6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.915135 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.915184 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:38 crc kubenswrapper[4664]: I1013 07:50:38.915219 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:39 crc kubenswrapper[4664]: I1013 07:50:39.783415 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:39 crc kubenswrapper[4664]: I1013 07:50:39.805609 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" start-of-body= Oct 13 07:50:39 crc kubenswrapper[4664]: I1013 07:50:39.805673 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" Oct 13 07:50:39 crc kubenswrapper[4664]: I1013 07:50:39.959372 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" containerID="cri-o://507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42" gracePeriod=20 Oct 13 07:50:40 crc kubenswrapper[4664]: I1013 07:50:40.240581 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podUID="4f2dd003-654f-4e3d-9fb9-cbea80c68acd" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.65:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:40 crc kubenswrapper[4664]: I1013 07:50:40.240618 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-5655c58dd6-5p4jw" podUID="4f2dd003-654f-4e3d-9fb9-cbea80c68acd" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.65:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:40 crc kubenswrapper[4664]: I1013 07:50:40.521928 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 07:50:40 crc kubenswrapper[4664]: I1013 07:50:40.786292 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-operator-688d597459-5n8bw" podUID="844ed68c-a79c-4751-98b2-d0459d583d06" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.68:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.524194 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-bsghg" podUID="82047d93-7f79-495b-9e1a-380994104bb0" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:41 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:41 crc kubenswrapper[4664]: > Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.526027 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-bsghg" podUID="82047d93-7f79-495b-9e1a-380994104bb0" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:41 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:41 crc kubenswrapper[4664]: > Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.641125 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.643504 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.641128 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.643573 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.784523 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.784523 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.784658 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.784692 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 13 07:50:41 crc kubenswrapper[4664]: I1013 07:50:41.791142 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"19489a9e2a956a6eac4a3a695039618d5f0e1b7dc771e7427d7391a00e2a6983"} pod="openstack/openstack-cell1-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.410082 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:42 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:42 crc kubenswrapper[4664]: > Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.411438 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:42 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:42 crc kubenswrapper[4664]: > Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.417329 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:42 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:42 crc kubenswrapper[4664]: > Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.417903 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.419404 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.747942 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="43ff3dc8-8a2c-4051-813b-69406ed7359e" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.0.212:8080/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.783670 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.813392 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-v4pkt" podUID="55e9e787-3f8b-4a88-8693-7e0b265b4724" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.972108 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"77f2d054cda76f67f0a1af0675e2ac31270a35a2e02034d1c4b70c08f9602154"} pod="openshift-marketplace/redhat-marketplace-5b7pw" containerMessage="Container registry-server failed liveness probe, will be restarted" Oct 13 07:50:42 crc kubenswrapper[4664]: I1013 07:50:42.972173 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" containerID="cri-o://77f2d054cda76f67f0a1af0675e2ac31270a35a2e02034d1c4b70c08f9602154" gracePeriod=30 Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.121265 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="fd97ddf9-5f06-4c91-af04-42c116fac89d" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.791260 4664 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.792603 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.792649 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.794266 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-scheduler" containerStatusID={"Type":"cri-o","ID":"31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4"} pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" containerMessage="Container kube-scheduler failed liveness probe, will be restarted" Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.794357 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" containerID="cri-o://31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4" gracePeriod=30 Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.867320 4664 patch_prober.go:28] interesting pod/nmstate-webhook-6cdbc54649-kwpkw container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.40:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.867385 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" podUID="d5f7b13a-10cb-44b2-89b1-e434b8f81923" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.40:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.867477 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.985820 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerDied","Data":"7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb"} Oct 13 07:50:43 crc kubenswrapper[4664]: I1013 07:50:43.986863 4664 generic.go:334] "Generic (PLEG): container finished" podID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerID="7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb" exitCode=0 Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.132212 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:44 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:44 crc kubenswrapper[4664]: > Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.279633 4664 patch_prober.go:28] interesting pod/console-54c9bfc7b6-rvkt8 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.45:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.279699 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-54c9bfc7b6-rvkt8" podUID="e9fa90fd-fb25-4656-b2ee-e4788866cf6d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.45:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.324594 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-kwpkw" Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.640002 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.640256 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.640316 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.640343 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:44 crc kubenswrapper[4664]: I1013 07:50:44.809259 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="fd97ddf9-5f06-4c91-af04-42c116fac89d" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.058375 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:50:45 crc kubenswrapper[4664]: E1013 07:50:45.068061 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.578864 4664 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" start-of-body= Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.579539 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.579770 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.583116 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.583147 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.583189 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.583150 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.583272 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.583308 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.584613 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="console-operator" containerStatusID={"Type":"cri-o","ID":"74037942c68cc1e918f1461b47dda60c6de3730b6cce924bddbdd0b097724fa9"} pod="openshift-console-operator/console-operator-58897d9998-6bz9t" containerMessage="Container console-operator failed liveness probe, will be restarted" Oct 13 07:50:45 crc kubenswrapper[4664]: I1013 07:50:45.584653 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" containerID="cri-o://74037942c68cc1e918f1461b47dda60c6de3730b6cce924bddbdd0b097724fa9" gracePeriod=30 Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.194588 4664 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-gkm66 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.194654 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" podUID="4e2fa3b0-29e6-4bdd-aed7-82cc6690b549" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.194989 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.196075 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="authentication-operator" containerStatusID={"Type":"cri-o","ID":"9e2011f46eb121206644b4823cd39cf24fb0aee818323f1623797e3a07f99827"} pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" containerMessage="Container authentication-operator failed liveness probe, will be restarted" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.196127 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" podUID="4e2fa3b0-29e6-4bdd-aed7-82cc6690b549" containerName="authentication-operator" containerID="cri-o://9e2011f46eb121206644b4823cd39cf24fb0aee818323f1623797e3a07f99827" gracePeriod=30 Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.439623 4664 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-mb6jf container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.439650 4664 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-mb6jf container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.439681 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" podUID="8051931c-c553-49fb-82bc-f584e6a34ff2" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.440447 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mb6jf" podUID="8051931c-c553-49fb-82bc-f584e6a34ff2" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.576965 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.577577 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.577671 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.577046 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.577775 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.577863 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.580982 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"6d583ad9075204a43d3edc2da689d253133491dc76b278ac8b780271c9ccb87e"} pod="openshift-ingress/router-default-5444994796-4bxj9" containerMessage="Container router failed liveness probe, will be restarted" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.581032 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" containerID="cri-o://6d583ad9075204a43d3edc2da689d253133491dc76b278ac8b780271c9ccb87e" gracePeriod=10 Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.623991 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624021 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624042 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.623994 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624070 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624084 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624110 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624131 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624156 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624191 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624241 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624308 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624480 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.624522 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.625451 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="olm-operator" containerStatusID={"Type":"cri-o","ID":"bb6e870c2f92cba2078cb02652c31251db1d82452046d4d3512a8383635cfa18"} pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" containerMessage="Container olm-operator failed liveness probe, will be restarted" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.625483 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" containerID="cri-o://bb6e870c2f92cba2078cb02652c31251db1d82452046d4d3512a8383635cfa18" gracePeriod=30 Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.627580 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="catalog-operator" containerStatusID={"Type":"cri-o","ID":"224ff3c0bee9d10f6f2ede628df0aaef3ac7b027a4622c3a3ff6e33ea61c70e0"} pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" containerMessage="Container catalog-operator failed liveness probe, will be restarted" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.627644 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" containerID="cri-o://224ff3c0bee9d10f6f2ede628df0aaef3ac7b027a4622c3a3ff6e33ea61c70e0" gracePeriod=30 Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.655350 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.655406 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.655450 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.655459 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.655536 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.655633 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.656583 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="packageserver" containerStatusID={"Type":"cri-o","ID":"8b9d7c6ede3c9f0b22bba018ae134acfb951e8df33df13a0eb3c845ac4cc0a2f"} pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" containerMessage="Container packageserver failed liveness probe, will be restarted" Oct 13 07:50:46 crc kubenswrapper[4664]: I1013 07:50:46.656631 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" containerID="cri-o://8b9d7c6ede3c9f0b22bba018ae134acfb951e8df33df13a0eb3c845ac4cc0a2f" gracePeriod=30 Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.057619 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerStarted","Data":"aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e"} Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.217067 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-sl4lq" podUID="deda5c15-ffb4-44e6-9e27-465106737111" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.75:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.217913 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.270991 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-8hnvm" podUID="68c3d701-56d2-4bce-8c6e-e1894084fecf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.76:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.425356 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-f57sl" podUID="bf55d8b4-9315-48b2-962c-318911833b6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.475043 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-zptb2" podUID="3c368f1e-93f0-440a-ad95-d205dd78e4b2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.74:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.517090 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-6mlnb" podUID="5367fac7-2b9f-4745-b3cb-4accdf26ef59" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.82:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.640934 4664 patch_prober.go:28] interesting pod/route-controller-manager-794668dd4-b58l9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" start-of-body= Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.640971 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-f2nxl" podUID="a76ae989-9e97-43ee-a38f-ebb30be19ab6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.83:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.640988 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" podUID="5c44ac49-ba1b-4708-bc11-d98ae6be1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.51:8443/healthz\": dial tcp 10.217.0.51:8443: connect: connection refused" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.723032 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-ncsms" podUID="bd3c97ad-17c7-47d7-ae5e-1a67c489c142" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.77:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.723120 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.723189 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764049 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764101 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764150 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764183 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764675 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764699 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="fd97ddf9-5f06-4c91-af04-42c116fac89d" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764733 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.765084 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.765110 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.764705 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.766085 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-scheduler" containerStatusID={"Type":"cri-o","ID":"4a5f1ab1d2b02066b676a5f2f840925d5c727dd05eb8983b65bb0867c965af73"} pod="openstack/cinder-scheduler-0" containerMessage="Container cinder-scheduler failed liveness probe, will be restarted" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.766139 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="fd97ddf9-5f06-4c91-af04-42c116fac89d" containerName="cinder-scheduler" containerID="cri-o://4a5f1ab1d2b02066b676a5f2f840925d5c727dd05eb8983b65bb0867c965af73" gracePeriod=30 Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.846951 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-vxchx" podUID="91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.84:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.847326 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.846951 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" podUID="de55f4c7-2413-4a00-8691-a9545525fc88" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.847376 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.847431 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.847444 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-jfsd4" podUID="b371ef2b-6ffd-4759-8a02-279705b4a4d3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.847462 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.848866 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"f2dd0ed45f74641ce27464256a14b28dd8c445359a85dab6676f5af42ba26336"} pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.848920 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" containerID="cri-o://f2dd0ed45f74641ce27464256a14b28dd8c445359a85dab6676f5af42ba26336" gracePeriod=30 Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.848930 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.848956 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.972022 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:47 crc kubenswrapper[4664]: I1013 07:50:47.972137 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.013248 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-fl4ff" podUID="86b8e5cb-18d9-4931-afb3-4f8dc9f788f0" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.054017 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-8mv7j" podUID="b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.065038 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" event={"ID":"6beaedb4-8ff1-4956-a04c-b4007e2d1c50","Type":"ContainerDied","Data":"224ff3c0bee9d10f6f2ede628df0aaef3ac7b027a4622c3a3ff6e33ea61c70e0"} Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.069323 4664 generic.go:334] "Generic (PLEG): container finished" podID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerID="224ff3c0bee9d10f6f2ede628df0aaef3ac7b027a4622c3a3ff6e33ea61c70e0" exitCode=0 Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.095917 4664 generic.go:334] "Generic (PLEG): container finished" podID="41825a43-78e2-42f0-aec8-2778276d69d8" containerID="77f2d054cda76f67f0a1af0675e2ac31270a35a2e02034d1c4b70c08f9602154" exitCode=0 Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.096027 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5b7pw" event={"ID":"41825a43-78e2-42f0-aec8-2778276d69d8","Type":"ContainerDied","Data":"77f2d054cda76f67f0a1af0675e2ac31270a35a2e02034d1c4b70c08f9602154"} Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.096045 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5458f77c4-7vhzw" podUID="f837553e-b572-4dcc-91b4-a8e6c2deb097" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.096143 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.128667 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-58897d9998-6bz9t_39908d1a-79af-485a-8deb-43f03552b3d1/console-operator/0.log" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.128782 4664 generic.go:334] "Generic (PLEG): container finished" podID="39908d1a-79af-485a-8deb-43f03552b3d1" containerID="74037942c68cc1e918f1461b47dda60c6de3730b6cce924bddbdd0b097724fa9" exitCode=1 Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.129126 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" event={"ID":"39908d1a-79af-485a-8deb-43f03552b3d1","Type":"ContainerDied","Data":"74037942c68cc1e918f1461b47dda60c6de3730b6cce924bddbdd0b097724fa9"} Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.129669 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="webhook-server" containerStatusID={"Type":"cri-o","ID":"855894c19e5600eaa4f4f973cdecb4cb7d71e9f941dd3c4ea61c15110e6757f5"} pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" containerMessage="Container webhook-server failed liveness probe, will be restarted" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.129720 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" podUID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerName="webhook-server" containerID="cri-o://855894c19e5600eaa4f4f973cdecb4cb7d71e9f941dd3c4ea61c15110e6757f5" gracePeriod=2 Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.313022 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.313037 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.313152 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.313352 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.319023 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="frr-k8s-webhook-server" containerStatusID={"Type":"cri-o","ID":"a58694108f3bbd4cd37b70b695dbe4baec1759fed367bd158e11dab4ee85eef1"} pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" containerMessage="Container frr-k8s-webhook-server failed liveness probe, will be restarted" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.319088 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" containerID="cri-o://a58694108f3bbd4cd37b70b695dbe4baec1759fed367bd158e11dab4ee85eef1" gracePeriod=10 Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.354142 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" podUID="24e25e0a-a138-41fb-b90e-08d800f751b4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.354247 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-srnhl" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.354305 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:50:48 crc kubenswrapper[4664]: E1013 07:50:48.434684 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Oct 13 07:50:48 crc kubenswrapper[4664]: E1013 07:50:48.436038 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Oct 13 07:50:48 crc kubenswrapper[4664]: E1013 07:50:48.437820 4664 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Oct 13 07:50:48 crc kubenswrapper[4664]: E1013 07:50:48.437855 4664 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerName="galera" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.756970 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" podUID="6647aa13-3608-4eeb-87b7-26741b9c2a6f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.757354 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.915166 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.915414 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.915521 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.915570 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.915647 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.918746 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller" containerStatusID={"Type":"cri-o","ID":"ad5d420848f09aeb8e3fd92496a4b4b0209c1d45dce76661242be0edd8a2ad69"} pod="metallb-system/frr-k8s-7pjp7" containerMessage="Container controller failed liveness probe, will be restarted" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.918884 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-7pjp7" podUID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerName="controller" containerID="cri-o://ad5d420848f09aeb8e3fd92496a4b4b0209c1d45dce76661242be0edd8a2ad69" gracePeriod=2 Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.919496 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": dial tcp [::1]:29150: connect: connection refused" Oct 13 07:50:48 crc kubenswrapper[4664]: I1013 07:50:48.919535 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-sngcf" podUID="54a84985-f353-4f50-aec1-d7b5501c1d2c" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": dial tcp [::1]:29150: connect: connection refused" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.013973 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" podUID="a096a4c5-5890-4100-8462-ec39d621ff38" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.160082 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" event={"ID":"6beaedb4-8ff1-4956-a04c-b4007e2d1c50","Type":"ContainerStarted","Data":"1b36e4d90217a964540413da568e2e036bc198beda4a72875856094c05176e28"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.160896 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.161340 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.161474 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.163881 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5b7pw" event={"ID":"41825a43-78e2-42f0-aec8-2778276d69d8","Type":"ContainerStarted","Data":"b6112d9c539d759c649602f105a2f85f6b162abef3434b010f5bfa40fbf932bb"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.166149 4664 generic.go:334] "Generic (PLEG): container finished" podID="4e2fa3b0-29e6-4bdd-aed7-82cc6690b549" containerID="9e2011f46eb121206644b4823cd39cf24fb0aee818323f1623797e3a07f99827" exitCode=0 Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.166235 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" event={"ID":"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549","Type":"ContainerDied","Data":"9e2011f46eb121206644b4823cd39cf24fb0aee818323f1623797e3a07f99827"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.168419 4664 generic.go:334] "Generic (PLEG): container finished" podID="425e3312-93c3-42cc-a840-a6cbb635e244" containerID="8b9d7c6ede3c9f0b22bba018ae134acfb951e8df33df13a0eb3c845ac4cc0a2f" exitCode=0 Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.168496 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" event={"ID":"425e3312-93c3-42cc-a840-a6cbb635e244","Type":"ContainerDied","Data":"8b9d7c6ede3c9f0b22bba018ae134acfb951e8df33df13a0eb3c845ac4cc0a2f"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.170720 4664 generic.go:334] "Generic (PLEG): container finished" podID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerID="bb6e870c2f92cba2078cb02652c31251db1d82452046d4d3512a8383635cfa18" exitCode=0 Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.171081 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" event={"ID":"1d87b0ff-e00c-4918-b7f9-a08a425a5012","Type":"ContainerDied","Data":"bb6e870c2f92cba2078cb02652c31251db1d82452046d4d3512a8383635cfa18"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.171115 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" event={"ID":"1d87b0ff-e00c-4918-b7f9-a08a425a5012","Type":"ContainerStarted","Data":"8a3f6d731a14e18ece5a3f777c8eb009ed77ae809027cefdad552b861f6c1e07"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.171975 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.172372 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.172408 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.172943 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-7777fb866f-gcdzv_65efd88f-be68-494a-a3b8-3a1b2df263d9/openshift-config-operator/1.log" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.176392 4664 generic.go:334] "Generic (PLEG): container finished" podID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerID="f2dd0ed45f74641ce27464256a14b28dd8c445359a85dab6676f5af42ba26336" exitCode=2 Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.176508 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" event={"ID":"65efd88f-be68-494a-a3b8-3a1b2df263d9","Type":"ContainerDied","Data":"f2dd0ed45f74641ce27464256a14b28dd8c445359a85dab6676f5af42ba26336"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.177120 4664 scope.go:117] "RemoveContainer" containerID="36bed393495904507ddabd8cc59b5eaedcf2f31bdcc2ff39a3d28dca2eb91ba9" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.179343 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-58897d9998-6bz9t_39908d1a-79af-485a-8deb-43f03552b3d1/console-operator/0.log" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.179402 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" event={"ID":"39908d1a-79af-485a-8deb-43f03552b3d1","Type":"ContainerStarted","Data":"914aad64a6c82627bc6db49aabfb2ea8ce15123592ecc43f551ca33948f2c1af"} Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.179612 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.179894 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.179931 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.310709 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.318498 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": dial tcp 10.217.0.49:7572: connect: connection refused" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.479993 4664 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rhmxl container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.480711 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" podUID="acb034b5-2645-458a-91ae-14c42b6632b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.480504 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" podUID="24e25e0a-a138-41fb-b90e-08d800f751b4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.480480 4664 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rhmxl container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.481033 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-rhmxl" podUID="acb034b5-2645-458a-91ae-14c42b6632b2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.719601 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-w8mlm" Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.805680 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" start-of-body= Oct 13 07:50:49 crc kubenswrapper[4664]: I1013 07:50:49.805990 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": dial tcp 10.217.0.52:8443: connect: connection refused" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.059031 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" containerID="cri-o://19489a9e2a956a6eac4a3a695039618d5f0e1b7dc771e7427d7391a00e2a6983" gracePeriod=22 Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.206788 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" event={"ID":"425e3312-93c3-42cc-a840-a6cbb635e244","Type":"ContainerStarted","Data":"5b0b111cfd3481f5e660376965e667e16614a3e54db6647b732a80d336ffc582"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.207441 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.207712 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" start-of-body= Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.207772 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.214049 4664 generic.go:334] "Generic (PLEG): container finished" podID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerID="a58694108f3bbd4cd37b70b695dbe4baec1759fed367bd158e11dab4ee85eef1" exitCode=0 Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.214126 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" event={"ID":"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309","Type":"ContainerDied","Data":"a58694108f3bbd4cd37b70b695dbe4baec1759fed367bd158e11dab4ee85eef1"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.226836 4664 generic.go:334] "Generic (PLEG): container finished" podID="0daca0ae-d791-4eca-bdc5-8e5f598b8d85" containerID="855894c19e5600eaa4f4f973cdecb4cb7d71e9f941dd3c4ea61c15110e6757f5" exitCode=0 Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.226919 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" event={"ID":"0daca0ae-d791-4eca-bdc5-8e5f598b8d85","Type":"ContainerDied","Data":"855894c19e5600eaa4f4f973cdecb4cb7d71e9f941dd3c4ea61c15110e6757f5"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.271497 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-7777fb866f-gcdzv_65efd88f-be68-494a-a3b8-3a1b2df263d9/openshift-config-operator/1.log" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.278560 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" event={"ID":"65efd88f-be68-494a-a3b8-3a1b2df263d9","Type":"ContainerStarted","Data":"b0e408435b8608c72f2042c2e379eefc9819205fc4094fcbb542439c528cd112"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.279197 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.295148 4664 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4" exitCode=0 Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.295223 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"31020613aa728592f7047b9d6d2d94b737760ad515bb3bfd500b981b3c1539c4"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.369717 4664 generic.go:334] "Generic (PLEG): container finished" podID="9037bcaf-a327-4e60-acf8-978687eb88e9" containerID="ad5d420848f09aeb8e3fd92496a4b4b0209c1d45dce76661242be0edd8a2ad69" exitCode=0 Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.370056 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerDied","Data":"ad5d420848f09aeb8e3fd92496a4b4b0209c1d45dce76661242be0edd8a2ad69"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.370089 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7pjp7" event={"ID":"9037bcaf-a327-4e60-acf8-978687eb88e9","Type":"ContainerStarted","Data":"d9e4afcfe66d92cd27b23f054c08c58ffdaf8dbe37ac63bd6d83cca332559fec"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.370731 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.391586 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gkm66" event={"ID":"4e2fa3b0-29e6-4bdd-aed7-82cc6690b549","Type":"ContainerStarted","Data":"11e3ae0d45f2df7051ecd85e1f6ba76552dbfb8681be4703f14af3fbccc073e4"} Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.402454 4664 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8jkvs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.402501 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" podUID="1d87b0ff-e00c-4918-b7f9-a08a425a5012" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.402779 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.402866 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.403131 4664 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pgcw9 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.403153 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" podUID="6beaedb4-8ff1-4956-a04c-b4007e2d1c50" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.523943 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.536491 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 07:50:50 crc kubenswrapper[4664]: I1013 07:50:50.741641 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:50 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:50 crc kubenswrapper[4664]: > Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.403708 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" event={"ID":"0daca0ae-d791-4eca-bdc5-8e5f598b8d85","Type":"ContainerStarted","Data":"7e73a08a7cbc0b393fa31f9321b95d64f81cc66fb9fc8adee16a7888c2cde140"} Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.404341 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.409343 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" event={"ID":"cc585f12-7a56-4e1e-a5a1-4a9ccedd4309","Type":"ContainerStarted","Data":"600042e03a86719c846cdabb857865c4954cfa7c836418e293817c4de3a30b52"} Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.410299 4664 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jz47t container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" start-of-body= Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.410324 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" podUID="cc585f12-7a56-4e1e-a5a1-4a9ccedd4309" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.49:7572/metrics\": dial tcp 10.217.0.49:7572: connect: connection refused" Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.410340 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" podUID="425e3312-93c3-42cc-a840-a6cbb635e244" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.677211 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:50:51 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:50:51 crc kubenswrapper[4664]: > Oct 13 07:50:51 crc kubenswrapper[4664]: I1013 07:50:51.784023 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerName="galera" probeResult="failure" output="command timed out" Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.432055 4664 generic.go:334] "Generic (PLEG): container finished" podID="cc0510b6-15a8-4d1a-93c3-f92869340539" containerID="507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42" exitCode=0 Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.432119 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc0510b6-15a8-4d1a-93c3-f92869340539","Type":"ContainerDied","Data":"507a144010966912bab9f5189e342fbab0cc83403615c643729f5b67af709b42"} Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.452590 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7d7fd3dbca4b0e061052d3fe2b6075dbeb01a82bf5be9755f1bc0c299f874068"} Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.453241 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.453286 4664 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" start-of-body= Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.453326 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.453735 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:52 crc kubenswrapper[4664]: I1013 07:50:52.453757 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.466931 4664 generic.go:334] "Generic (PLEG): container finished" podID="a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931" containerID="19489a9e2a956a6eac4a3a695039618d5f0e1b7dc771e7427d7391a00e2a6983" exitCode=0 Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.467552 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931","Type":"ContainerDied","Data":"19489a9e2a956a6eac4a3a695039618d5f0e1b7dc771e7427d7391a00e2a6983"} Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.470728 4664 generic.go:334] "Generic (PLEG): container finished" podID="fd97ddf9-5f06-4c91-af04-42c116fac89d" containerID="4a5f1ab1d2b02066b676a5f2f840925d5c727dd05eb8983b65bb0867c965af73" exitCode=0 Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.470827 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fd97ddf9-5f06-4c91-af04-42c116fac89d","Type":"ContainerDied","Data":"4a5f1ab1d2b02066b676a5f2f840925d5c727dd05eb8983b65bb0867c965af73"} Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.471098 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.639675 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.639725 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.640636 4664 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-gcdzv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 13 07:50:53 crc kubenswrapper[4664]: I1013 07:50:53.640699 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" podUID="65efd88f-be68-494a-a3b8-3a1b2df263d9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 13 07:50:54 crc kubenswrapper[4664]: I1013 07:50:54.524944 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931","Type":"ContainerStarted","Data":"5b6b88fc7286ccc4056d33e230227e3761a0286afa4dff2081bca1f0586e817e"} Oct 13 07:50:54 crc kubenswrapper[4664]: I1013 07:50:54.563433 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc0510b6-15a8-4d1a-93c3-f92869340539","Type":"ContainerStarted","Data":"f684937f1731dc6223094f80348b39f9e20bd5ca383f05290cf7ca1dd76685df"} Oct 13 07:50:54 crc kubenswrapper[4664]: I1013 07:50:54.588932 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 13 07:50:54 crc kubenswrapper[4664]: I1013 07:50:54.588976 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 13 07:50:54 crc kubenswrapper[4664]: I1013 07:50:54.589186 4664 patch_prober.go:28] interesting pod/console-operator-58897d9998-6bz9t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 13 07:50:54 crc kubenswrapper[4664]: I1013 07:50:54.589231 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" podUID="39908d1a-79af-485a-8deb-43f03552b3d1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.079853 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.087464 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-notification-agent" containerID="cri-o://99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b" gracePeriod=30 Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.087522 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" containerID="cri-o://aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e" gracePeriod=30 Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.087469 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="sg-core" containerID="cri-o://f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd" gracePeriod=30 Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.087472 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="proxy-httpd" containerID="cri-o://bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729" gracePeriod=30 Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.632560 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8jkvs" Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.648646 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pgcw9" Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.739983 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jz47t" Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.948696 4664 patch_prober.go:28] interesting pod/router-default-5444994796-4bxj9 container/router namespace/openshift-ingress: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]backend-http ok Oct 13 07:50:55 crc kubenswrapper[4664]: [+]has-synced ok Oct 13 07:50:55 crc kubenswrapper[4664]: [-]process-running failed: reason withheld Oct 13 07:50:55 crc kubenswrapper[4664]: healthz check failed Oct 13 07:50:55 crc kubenswrapper[4664]: I1013 07:50:55.949053 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-4bxj9" podUID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 13 07:50:56 crc kubenswrapper[4664]: I1013 07:50:56.058132 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:50:56 crc kubenswrapper[4664]: E1013 07:50:56.060395 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:50:56 crc kubenswrapper[4664]: I1013 07:50:56.525510 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" containerID="cri-o://f91e5cd8608543258f8cfb67e529651e03a24e1a5451ea212f0171b0310c68d8" gracePeriod=15 Oct 13 07:50:56 crc kubenswrapper[4664]: I1013 07:50:56.602400 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerDied","Data":"f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd"} Oct 13 07:50:56 crc kubenswrapper[4664]: I1013 07:50:56.606129 4664 generic.go:334] "Generic (PLEG): container finished" podID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerID="f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd" exitCode=2 Oct 13 07:50:56 crc kubenswrapper[4664]: I1013 07:50:56.667719 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gcdzv" Oct 13 07:50:56 crc kubenswrapper[4664]: I1013 07:50:56.987362 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-22k5w" Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.390109 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7b75c7q" Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.549395 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-794668dd4-b58l9" Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.723389 4664 generic.go:334] "Generic (PLEG): container finished" podID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerID="bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729" exitCode=0 Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.723488 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerDied","Data":"bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729"} Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.757830 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ingress_router-default-5444994796-4bxj9_207d72d8-7daf-4223-9b7a-25c4edfdb490/router/0.log" Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.758353 4664 generic.go:334] "Generic (PLEG): container finished" podID="207d72d8-7daf-4223-9b7a-25c4edfdb490" containerID="6d583ad9075204a43d3edc2da689d253133491dc76b278ac8b780271c9ccb87e" exitCode=137 Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.758477 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4bxj9" event={"ID":"207d72d8-7daf-4223-9b7a-25c4edfdb490","Type":"ContainerDied","Data":"6d583ad9075204a43d3edc2da689d253133491dc76b278ac8b780271c9ccb87e"} Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.768304 4664 generic.go:334] "Generic (PLEG): container finished" podID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerID="f91e5cd8608543258f8cfb67e529651e03a24e1a5451ea212f0171b0310c68d8" exitCode=0 Oct 13 07:50:57 crc kubenswrapper[4664]: I1013 07:50:57.768348 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" event={"ID":"8d816f6a-321a-498a-a01d-0e156c69b4a1","Type":"ContainerDied","Data":"f91e5cd8608543258f8cfb67e529651e03a24e1a5451ea212f0171b0310c68d8"} Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.428380 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.429472 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.778034 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" event={"ID":"8d816f6a-321a-498a-a01d-0e156c69b4a1","Type":"ContainerStarted","Data":"83999a9a061fb2d5de0dca3364da988c10c3fcd7ed2bf5992c34999be61e2627"} Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.778421 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.778465 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": dial tcp 10.217.0.55:6443: connect: connection refused" start-of-body= Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.778514 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": dial tcp 10.217.0.55:6443: connect: connection refused" Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.780978 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ingress_router-default-5444994796-4bxj9_207d72d8-7daf-4223-9b7a-25c4edfdb490/router/0.log" Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.781928 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4bxj9" event={"ID":"207d72d8-7daf-4223-9b7a-25c4edfdb490","Type":"ContainerStarted","Data":"9fe6134af3fa5f7aceac47ca951a21acbb137899e651691228f38cceea880917"} Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.796335 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fd97ddf9-5f06-4c91-af04-42c116fac89d","Type":"ContainerStarted","Data":"4c43ff94fc7ae1e7af6c8eb6e1dcfdb838b9a232fac4efaf546c9457898e9838"} Oct 13 07:50:58 crc kubenswrapper[4664]: I1013 07:50:58.928900 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-sngcf" Oct 13 07:50:59 crc kubenswrapper[4664]: I1013 07:50:59.495144 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 07:50:59 crc kubenswrapper[4664]: I1013 07:50:59.499920 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 07:50:59 crc kubenswrapper[4664]: I1013 07:50:59.805173 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 07:50:59 crc kubenswrapper[4664]: I1013 07:50:59.822312 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" Oct 13 07:50:59 crc kubenswrapper[4664]: I1013 07:50:59.823435 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-4bxj9" Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.020012 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" probeResult="failure" output=< Oct 13 07:51:00 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:51:00 crc kubenswrapper[4664]: > Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.032466 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.032518 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.805019 4664 patch_prober.go:28] interesting pod/oauth-openshift-6fff5dcfd9-rpdgm container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.55:6443/healthz\": context deadline exceeded" start-of-body= Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.805285 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" podUID="8d816f6a-321a-498a-a01d-0e156c69b4a1" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.55:6443/healthz\": context deadline exceeded" Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.819622 4664 generic.go:334] "Generic (PLEG): container finished" podID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerID="99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b" exitCode=0 Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.819854 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerDied","Data":"99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b"} Oct 13 07:51:00 crc kubenswrapper[4664]: I1013 07:51:00.919690 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6fff5dcfd9-rpdgm" Oct 13 07:51:01 crc kubenswrapper[4664]: I1013 07:51:01.620769 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-5b7pw" podUID="41825a43-78e2-42f0-aec8-2778276d69d8" containerName="registry-server" probeResult="failure" output=< Oct 13 07:51:01 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 07:51:01 crc kubenswrapper[4664]: > Oct 13 07:51:02 crc kubenswrapper[4664]: I1013 07:51:02.390697 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Oct 13 07:51:02 crc kubenswrapper[4664]: I1013 07:51:02.516190 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Oct 13 07:51:02 crc kubenswrapper[4664]: I1013 07:51:02.671319 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Oct 13 07:51:02 crc kubenswrapper[4664]: I1013 07:51:02.750944 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Oct 13 07:51:03 crc kubenswrapper[4664]: I1013 07:51:03.342002 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 13 07:51:03 crc kubenswrapper[4664]: I1013 07:51:03.398712 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 13 07:51:04 crc kubenswrapper[4664]: I1013 07:51:04.599556 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-6bz9t" Oct 13 07:51:06 crc kubenswrapper[4664]: I1013 07:51:06.463741 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-594d989fdd-xrzvh" Oct 13 07:51:07 crc kubenswrapper[4664]: I1013 07:51:07.243988 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-nf4cs" Oct 13 07:51:07 crc kubenswrapper[4664]: I1013 07:51:07.792938 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-7pjp7" Oct 13 07:51:08 crc kubenswrapper[4664]: I1013 07:51:08.976850 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:51:09 crc kubenswrapper[4664]: I1013 07:51:09.040387 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:51:10 crc kubenswrapper[4664]: I1013 07:51:10.595397 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 07:51:10 crc kubenswrapper[4664]: I1013 07:51:10.649925 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5b7pw" Oct 13 07:51:11 crc kubenswrapper[4664]: I1013 07:51:11.051117 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:51:11 crc kubenswrapper[4664]: E1013 07:51:11.051356 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:51:14 crc kubenswrapper[4664]: I1013 07:51:14.250187 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dvt4k"] Oct 13 07:51:14 crc kubenswrapper[4664]: I1013 07:51:14.250832 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dvt4k" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" containerID="cri-o://78d9f1e1f569a5fff0e7ed94d9b1af55ab3a054b299cc5beea5c968045323a25" gracePeriod=2 Oct 13 07:51:14 crc kubenswrapper[4664]: I1013 07:51:14.942218 4664 generic.go:334] "Generic (PLEG): container finished" podID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerID="78d9f1e1f569a5fff0e7ed94d9b1af55ab3a054b299cc5beea5c968045323a25" exitCode=0 Oct 13 07:51:14 crc kubenswrapper[4664]: I1013 07:51:14.942263 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dvt4k" event={"ID":"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2","Type":"ContainerDied","Data":"78d9f1e1f569a5fff0e7ed94d9b1af55ab3a054b299cc5beea5c968045323a25"} Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.700010 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.825513 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qst7k\" (UniqueName: \"kubernetes.io/projected/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-kube-api-access-qst7k\") pod \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.825581 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-utilities\") pod \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.825754 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-catalog-content\") pod \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\" (UID: \"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2\") " Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.829579 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-utilities" (OuterVolumeSpecName: "utilities") pod "3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" (UID: "3bb26cc4-bdaf-4909-a95f-d88843cf8ff2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.925941 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-kube-api-access-qst7k" (OuterVolumeSpecName: "kube-api-access-qst7k") pod "3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" (UID: "3bb26cc4-bdaf-4909-a95f-d88843cf8ff2"). InnerVolumeSpecName "kube-api-access-qst7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.930749 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qst7k\" (UniqueName: \"kubernetes.io/projected/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-kube-api-access-qst7k\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.930814 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.954303 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" (UID: "3bb26cc4-bdaf-4909-a95f-d88843cf8ff2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.982461 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dvt4k" event={"ID":"3bb26cc4-bdaf-4909-a95f-d88843cf8ff2","Type":"ContainerDied","Data":"b90487450fca13daebc65d525c805c253c734b933c2e23f3164bf22b9a01abd0"} Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.982516 4664 scope.go:117] "RemoveContainer" containerID="78d9f1e1f569a5fff0e7ed94d9b1af55ab3a054b299cc5beea5c968045323a25" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.993040 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dvt4k" Oct 13 07:51:16 crc kubenswrapper[4664]: I1013 07:51:16.999036 4664 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.216:3000/\": dial tcp 10.217.0.216:3000: connect: connection refused" Oct 13 07:51:17 crc kubenswrapper[4664]: I1013 07:51:17.032057 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:17 crc kubenswrapper[4664]: I1013 07:51:17.070802 4664 scope.go:117] "RemoveContainer" containerID="752fd808130327cbd83c230cec49efdc23f071a011fd6644c7cfba334957aa46" Oct 13 07:51:17 crc kubenswrapper[4664]: I1013 07:51:17.108250 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dvt4k"] Oct 13 07:51:17 crc kubenswrapper[4664]: I1013 07:51:17.116134 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dvt4k"] Oct 13 07:51:17 crc kubenswrapper[4664]: I1013 07:51:17.120599 4664 scope.go:117] "RemoveContainer" containerID="b30feba35d4ea9cf6ef086a997c511d7bbcffd327939bd1d6b1d8e18466c1296" Oct 13 07:51:19 crc kubenswrapper[4664]: I1013 07:51:19.068547 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" path="/var/lib/kubelet/pods/3bb26cc4-bdaf-4909-a95f-d88843cf8ff2/volumes" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.837168 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.892004 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llwzr\" (UniqueName: \"kubernetes.io/projected/bcc8e7a7-4341-433f-aca2-8beec1daf684-kube-api-access-llwzr\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.892330 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-run-httpd\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.892459 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-log-httpd\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.892566 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-sg-core-conf-yaml\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.892782 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-config-data\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.892949 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-ceilometer-tls-certs\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.893199 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-scripts\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.893614 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-combined-ca-bundle\") pod \"bcc8e7a7-4341-433f-aca2-8beec1daf684\" (UID: \"bcc8e7a7-4341-433f-aca2-8beec1daf684\") " Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.917760 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcc8e7a7-4341-433f-aca2-8beec1daf684-kube-api-access-llwzr" (OuterVolumeSpecName: "kube-api-access-llwzr") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "kube-api-access-llwzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.920599 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.921226 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.922000 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-scripts" (OuterVolumeSpecName: "scripts") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.957683 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.997893 4664 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-scripts\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.997931 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llwzr\" (UniqueName: \"kubernetes.io/projected/bcc8e7a7-4341-433f-aca2-8beec1daf684-kube-api-access-llwzr\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.997944 4664 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.997955 4664 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bcc8e7a7-4341-433f-aca2-8beec1daf684-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:25 crc kubenswrapper[4664]: I1013 07:51:25.997968 4664 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.006532 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.032673 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.055115 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.055528 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.073745 4664 generic.go:334] "Generic (PLEG): container finished" podID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerID="aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e" exitCode=137 Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.073909 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.073931 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerDied","Data":"aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e"} Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.074931 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bcc8e7a7-4341-433f-aca2-8beec1daf684","Type":"ContainerDied","Data":"61b009cfb7cfa102aba26aae41c8eaa90937195b3ee76f9261b671d359980850"} Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.074953 4664 scope.go:117] "RemoveContainer" containerID="aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.086564 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-config-data" (OuterVolumeSpecName: "config-data") pod "bcc8e7a7-4341-433f-aca2-8beec1daf684" (UID: "bcc8e7a7-4341-433f-aca2-8beec1daf684"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.100774 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.100831 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.100873 4664 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bcc8e7a7-4341-433f-aca2-8beec1daf684-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.124870 4664 scope.go:117] "RemoveContainer" containerID="bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.180593 4664 scope.go:117] "RemoveContainer" containerID="f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.270840 4664 scope.go:117] "RemoveContainer" containerID="99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.334442 4664 scope.go:117] "RemoveContainer" containerID="7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.413623 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.436507 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.445955 4664 scope.go:117] "RemoveContainer" containerID="aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.446580 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e\": container with ID starting with aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e not found: ID does not exist" containerID="aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.446629 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e"} err="failed to get container status \"aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e\": rpc error: code = NotFound desc = could not find container \"aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e\": container with ID starting with aa52269ba52b3da5194a313f76c4489ac3f84cd8ee81e959aa4d684c0cdc0d2e not found: ID does not exist" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.446697 4664 scope.go:117] "RemoveContainer" containerID="bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.447205 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729\": container with ID starting with bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729 not found: ID does not exist" containerID="bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.447233 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729"} err="failed to get container status \"bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729\": rpc error: code = NotFound desc = could not find container \"bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729\": container with ID starting with bc53bf76941feb6a11b936a3c72af738909b3170610e543a55794d7abbb5b729 not found: ID does not exist" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.447254 4664 scope.go:117] "RemoveContainer" containerID="f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.447580 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd\": container with ID starting with f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd not found: ID does not exist" containerID="f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.447623 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd"} err="failed to get container status \"f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd\": rpc error: code = NotFound desc = could not find container \"f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd\": container with ID starting with f659341d07eb971cefaac2fec215a18f1789d547b46d1de1452d2e810585a2cd not found: ID does not exist" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.447757 4664 scope.go:117] "RemoveContainer" containerID="99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.448079 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b\": container with ID starting with 99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b not found: ID does not exist" containerID="99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.448106 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b"} err="failed to get container status \"99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b\": rpc error: code = NotFound desc = could not find container \"99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b\": container with ID starting with 99c72992fba092f77754e22dd6295d827db77339a6b4610593f99f04b416b86b not found: ID does not exist" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.448128 4664 scope.go:117] "RemoveContainer" containerID="7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.448494 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb\": container with ID starting with 7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb not found: ID does not exist" containerID="7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.448520 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb"} err="failed to get container status \"7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb\": rpc error: code = NotFound desc = could not find container \"7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb\": container with ID starting with 7d5360636fd4683b127e610f367a483110e313d9e2de3cef3f28ba5665644fcb not found: ID does not exist" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.575454 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.577861 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="extract-content" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580238 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="extract-content" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580267 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="extract-utilities" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580276 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="extract-utilities" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580289 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-notification-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580295 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-notification-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580309 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580314 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580324 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="extract-utilities" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580330 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="extract-utilities" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580356 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580363 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580381 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="sg-core" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580387 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="sg-core" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580401 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580407 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580419 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="extract-content" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580425 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="extract-content" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.580436 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="proxy-httpd" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.580442 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="proxy-httpd" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.581738 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-notification-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.581779 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="sg-core" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.581811 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f0168ad-aea2-43fe-b228-1ed1bff808dc" containerName="registry-server" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.581827 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="proxy-httpd" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.581836 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.581862 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bb26cc4-bdaf-4909-a95f-d88843cf8ff2" containerName="registry-server" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.581872 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: E1013 07:51:26.582118 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.582136 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" containerName="ceilometer-central-agent" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.598179 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.649688 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.649685 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.649692 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.713557 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.713635 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/851d9da2-ec75-41a8-8201-11601fe995d6-run-httpd\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.713731 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-scripts\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.713753 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.713846 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfbsw\" (UniqueName: \"kubernetes.io/projected/851d9da2-ec75-41a8-8201-11601fe995d6-kube-api-access-nfbsw\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.713948 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/851d9da2-ec75-41a8-8201-11601fe995d6-log-httpd\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.713973 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-config-data\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.714002 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.757894 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815366 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/851d9da2-ec75-41a8-8201-11601fe995d6-run-httpd\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815454 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-scripts\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815472 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815533 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfbsw\" (UniqueName: \"kubernetes.io/projected/851d9da2-ec75-41a8-8201-11601fe995d6-kube-api-access-nfbsw\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815578 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/851d9da2-ec75-41a8-8201-11601fe995d6-log-httpd\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815593 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-config-data\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815611 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.815637 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.816471 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/851d9da2-ec75-41a8-8201-11601fe995d6-log-httpd\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.816692 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/851d9da2-ec75-41a8-8201-11601fe995d6-run-httpd\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.832634 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.834404 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-config-data\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.834543 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-scripts\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.838275 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfbsw\" (UniqueName: \"kubernetes.io/projected/851d9da2-ec75-41a8-8201-11601fe995d6-kube-api-access-nfbsw\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.838673 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.840402 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/851d9da2-ec75-41a8-8201-11601fe995d6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"851d9da2-ec75-41a8-8201-11601fe995d6\") " pod="openstack/ceilometer-0" Oct 13 07:51:26 crc kubenswrapper[4664]: I1013 07:51:26.921654 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 13 07:51:27 crc kubenswrapper[4664]: I1013 07:51:27.059744 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcc8e7a7-4341-433f-aca2-8beec1daf684" path="/var/lib/kubelet/pods/bcc8e7a7-4341-433f-aca2-8beec1daf684/volumes" Oct 13 07:51:27 crc kubenswrapper[4664]: I1013 07:51:27.697060 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 13 07:51:27 crc kubenswrapper[4664]: W1013 07:51:27.708664 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod851d9da2_ec75_41a8_8201_11601fe995d6.slice/crio-fd46eec11d1f5f2444a0407e183d98b7c24bba62259a04385e38b148e7efe689 WatchSource:0}: Error finding container fd46eec11d1f5f2444a0407e183d98b7c24bba62259a04385e38b148e7efe689: Status 404 returned error can't find the container with id fd46eec11d1f5f2444a0407e183d98b7c24bba62259a04385e38b148e7efe689 Oct 13 07:51:28 crc kubenswrapper[4664]: I1013 07:51:28.096841 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"851d9da2-ec75-41a8-8201-11601fe995d6","Type":"ContainerStarted","Data":"fd46eec11d1f5f2444a0407e183d98b7c24bba62259a04385e38b148e7efe689"} Oct 13 07:51:29 crc kubenswrapper[4664]: I1013 07:51:29.115620 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"851d9da2-ec75-41a8-8201-11601fe995d6","Type":"ContainerStarted","Data":"2570930a077c043169a2d01259171de41756939bd58363061d766068a758da79"} Oct 13 07:51:29 crc kubenswrapper[4664]: I1013 07:51:29.117192 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"851d9da2-ec75-41a8-8201-11601fe995d6","Type":"ContainerStarted","Data":"9e53cf84404d59294d3f58c36d3ff9cb50f42bc577295f8248e5477b368371f2"} Oct 13 07:51:30 crc kubenswrapper[4664]: I1013 07:51:30.125887 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"851d9da2-ec75-41a8-8201-11601fe995d6","Type":"ContainerStarted","Data":"704f1ec4aa4e660c44b77f549eba429b7e54010bceae88feab632ca80c3ab53c"} Oct 13 07:51:32 crc kubenswrapper[4664]: I1013 07:51:32.145585 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"851d9da2-ec75-41a8-8201-11601fe995d6","Type":"ContainerStarted","Data":"4588c2a3963dc08722d7c5a8e641757315015056949cc92c37033ad4f800b965"} Oct 13 07:51:32 crc kubenswrapper[4664]: I1013 07:51:32.146174 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 13 07:51:32 crc kubenswrapper[4664]: I1013 07:51:32.173431 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.718921566 podStartE2EDuration="6.173409065s" podCreationTimestamp="2025-10-13 07:51:26 +0000 UTC" firstStartedPulling="2025-10-13 07:51:27.711672901 +0000 UTC m=+3895.399118093" lastFinishedPulling="2025-10-13 07:51:31.1661604 +0000 UTC m=+3898.853605592" observedRunningTime="2025-10-13 07:51:32.172450849 +0000 UTC m=+3899.859896041" watchObservedRunningTime="2025-10-13 07:51:32.173409065 +0000 UTC m=+3899.860854257" Oct 13 07:51:41 crc kubenswrapper[4664]: I1013 07:51:41.048933 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:51:41 crc kubenswrapper[4664]: E1013 07:51:41.049669 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:51:45 crc kubenswrapper[4664]: I1013 07:51:45.586872 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 13 07:51:52 crc kubenswrapper[4664]: I1013 07:51:52.362918 4664 generic.go:334] "Generic (PLEG): container finished" podID="67f690b7-0671-4a15-9d4d-1c65126e8a9a" containerID="6849ef9f6c082e8ad06890c2d64096d555bc842a91c14029b17ceb2088238852" exitCode=1 Oct 13 07:51:52 crc kubenswrapper[4664]: I1013 07:51:52.363070 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"67f690b7-0671-4a15-9d4d-1c65126e8a9a","Type":"ContainerDied","Data":"6849ef9f6c082e8ad06890c2d64096d555bc842a91c14029b17ceb2088238852"} Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.047574 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:51:54 crc kubenswrapper[4664]: E1013 07:51:54.048096 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.382517 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"67f690b7-0671-4a15-9d4d-1c65126e8a9a","Type":"ContainerDied","Data":"baecad2c17351bbf6195218e671ad414c5d56f41cf71af248177cc81e0f4c7d5"} Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.382850 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="baecad2c17351bbf6195218e671ad414c5d56f41cf71af248177cc81e0f4c7d5" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.411581 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491421 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491492 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config-secret\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491598 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ssh-key\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491626 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ca-certs\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491657 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491682 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-temporary\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491727 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-workdir\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491811 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-config-data\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.491882 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4n24d\" (UniqueName: \"kubernetes.io/projected/67f690b7-0671-4a15-9d4d-1c65126e8a9a-kube-api-access-4n24d\") pod \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\" (UID: \"67f690b7-0671-4a15-9d4d-1c65126e8a9a\") " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.507160 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-config-data" (OuterVolumeSpecName: "config-data") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.509402 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.520044 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67f690b7-0671-4a15-9d4d-1c65126e8a9a-kube-api-access-4n24d" (OuterVolumeSpecName: "kube-api-access-4n24d") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "kube-api-access-4n24d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.520527 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "test-operator-logs") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.532540 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.545338 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.563410 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.570079 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.574253 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "67f690b7-0671-4a15-9d4d-1c65126e8a9a" (UID: "67f690b7-0671-4a15-9d4d-1c65126e8a9a"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594565 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594692 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594706 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594717 4664 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/67f690b7-0671-4a15-9d4d-1c65126e8a9a-ca-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594754 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594767 4664 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594780 4664 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/67f690b7-0671-4a15-9d4d-1c65126e8a9a-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594808 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67f690b7-0671-4a15-9d4d-1c65126e8a9a-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.594820 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4n24d\" (UniqueName: \"kubernetes.io/projected/67f690b7-0671-4a15-9d4d-1c65126e8a9a-kube-api-access-4n24d\") on node \"crc\" DevicePath \"\"" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.626694 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Oct 13 07:51:54 crc kubenswrapper[4664]: E1013 07:51:54.627146 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67f690b7-0671-4a15-9d4d-1c65126e8a9a" containerName="tempest-tests-tempest-tests-runner" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.627162 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="67f690b7-0671-4a15-9d4d-1c65126e8a9a" containerName="tempest-tests-tempest-tests-runner" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.627364 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="67f690b7-0671-4a15-9d4d-1c65126e8a9a" containerName="tempest-tests-tempest-tests-runner" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.628065 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.637803 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s1" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.650767 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s1" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.673348 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.691402 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696518 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wcts\" (UniqueName: \"kubernetes.io/projected/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-kube-api-access-9wcts\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696558 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696581 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696595 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696706 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696756 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696786 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696824 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.696888 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.697742 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.732999 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.797872 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.797935 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wcts\" (UniqueName: \"kubernetes.io/projected/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-kube-api-access-9wcts\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.797971 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.797995 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.798013 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.798102 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.798132 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.798155 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.798373 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.799045 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.799350 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.801523 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.802132 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.802640 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.805177 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.816184 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wcts\" (UniqueName: \"kubernetes.io/projected/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-kube-api-access-9wcts\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:54 crc kubenswrapper[4664]: I1013 07:51:54.973241 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 07:51:55 crc kubenswrapper[4664]: I1013 07:51:55.392105 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Oct 13 07:51:55 crc kubenswrapper[4664]: I1013 07:51:55.539231 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Oct 13 07:51:55 crc kubenswrapper[4664]: W1013 07:51:55.545454 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3be173c2_e112_49d2_8b3b_b4cd0ed730fb.slice/crio-1880515aae867626d7b48b49c6a78dbec67138717679411bbedaf432f471e6bc WatchSource:0}: Error finding container 1880515aae867626d7b48b49c6a78dbec67138717679411bbedaf432f471e6bc: Status 404 returned error can't find the container with id 1880515aae867626d7b48b49c6a78dbec67138717679411bbedaf432f471e6bc Oct 13 07:51:56 crc kubenswrapper[4664]: I1013 07:51:56.411594 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"3be173c2-e112-49d2-8b3b-b4cd0ed730fb","Type":"ContainerStarted","Data":"1880515aae867626d7b48b49c6a78dbec67138717679411bbedaf432f471e6bc"} Oct 13 07:51:56 crc kubenswrapper[4664]: I1013 07:51:56.937392 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 13 07:51:58 crc kubenswrapper[4664]: I1013 07:51:58.479583 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" podStartSLOduration=4.479562825 podStartE2EDuration="4.479562825s" podCreationTimestamp="2025-10-13 07:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:51:58.472739371 +0000 UTC m=+3926.160184563" watchObservedRunningTime="2025-10-13 07:51:58.479562825 +0000 UTC m=+3926.167008007" Oct 13 07:51:59 crc kubenswrapper[4664]: I1013 07:51:59.465163 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"3be173c2-e112-49d2-8b3b-b4cd0ed730fb","Type":"ContainerStarted","Data":"54adc3e1d4ab387c6c872ed367c3a6d8f70ce603c57fd3e987b49b4e46a1de8b"} Oct 13 07:52:05 crc kubenswrapper[4664]: I1013 07:52:05.048844 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:52:05 crc kubenswrapper[4664]: E1013 07:52:05.050193 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:52:16 crc kubenswrapper[4664]: I1013 07:52:16.047669 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:52:16 crc kubenswrapper[4664]: E1013 07:52:16.049228 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:52:27 crc kubenswrapper[4664]: I1013 07:52:27.047731 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:52:27 crc kubenswrapper[4664]: E1013 07:52:27.048467 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:52:41 crc kubenswrapper[4664]: I1013 07:52:41.047457 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:52:41 crc kubenswrapper[4664]: E1013 07:52:41.048228 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:52:53 crc kubenswrapper[4664]: I1013 07:52:53.055138 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:52:53 crc kubenswrapper[4664]: E1013 07:52:53.056278 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.304886 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-69d56d54f7-4ft86"] Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.306788 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.332645 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-69d56d54f7-4ft86"] Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.431105 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-httpd-config\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.431459 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-ovndb-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.431522 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-combined-ca-bundle\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.431544 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8mkg\" (UniqueName: \"kubernetes.io/projected/933ab1aa-594f-4932-8c9c-d42473d443fb-kube-api-access-p8mkg\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.431583 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-public-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.431598 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-internal-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.431696 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-config\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.533451 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-config\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.533603 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-httpd-config\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.533652 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-ovndb-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.533729 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-combined-ca-bundle\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.533765 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8mkg\" (UniqueName: \"kubernetes.io/projected/933ab1aa-594f-4932-8c9c-d42473d443fb-kube-api-access-p8mkg\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.534002 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-public-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.534041 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-internal-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.548080 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-public-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.548193 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-combined-ca-bundle\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.548430 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-ovndb-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.549107 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-internal-tls-certs\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.550628 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-config\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.551474 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-httpd-config\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.560927 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8mkg\" (UniqueName: \"kubernetes.io/projected/933ab1aa-594f-4932-8c9c-d42473d443fb-kube-api-access-p8mkg\") pod \"neutron-69d56d54f7-4ft86\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:57 crc kubenswrapper[4664]: I1013 07:52:57.628917 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:58 crc kubenswrapper[4664]: I1013 07:52:58.519262 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-69d56d54f7-4ft86"] Oct 13 07:52:58 crc kubenswrapper[4664]: W1013 07:52:58.534708 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933ab1aa_594f_4932_8c9c_d42473d443fb.slice/crio-fcd1b850cc69c5a5c328bc05f747bcdb422662c87a85a9697f868b9dbc8ffaaa WatchSource:0}: Error finding container fcd1b850cc69c5a5c328bc05f747bcdb422662c87a85a9697f868b9dbc8ffaaa: Status 404 returned error can't find the container with id fcd1b850cc69c5a5c328bc05f747bcdb422662c87a85a9697f868b9dbc8ffaaa Oct 13 07:52:59 crc kubenswrapper[4664]: I1013 07:52:59.144740 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69d56d54f7-4ft86" event={"ID":"933ab1aa-594f-4932-8c9c-d42473d443fb","Type":"ContainerStarted","Data":"d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353"} Oct 13 07:52:59 crc kubenswrapper[4664]: I1013 07:52:59.145108 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:52:59 crc kubenswrapper[4664]: I1013 07:52:59.145121 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69d56d54f7-4ft86" event={"ID":"933ab1aa-594f-4932-8c9c-d42473d443fb","Type":"ContainerStarted","Data":"18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2"} Oct 13 07:52:59 crc kubenswrapper[4664]: I1013 07:52:59.145131 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69d56d54f7-4ft86" event={"ID":"933ab1aa-594f-4932-8c9c-d42473d443fb","Type":"ContainerStarted","Data":"fcd1b850cc69c5a5c328bc05f747bcdb422662c87a85a9697f868b9dbc8ffaaa"} Oct 13 07:52:59 crc kubenswrapper[4664]: I1013 07:52:59.162059 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-69d56d54f7-4ft86" podStartSLOduration=2.1620431 podStartE2EDuration="2.1620431s" podCreationTimestamp="2025-10-13 07:52:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 07:52:59.161363831 +0000 UTC m=+3986.848809053" watchObservedRunningTime="2025-10-13 07:52:59.1620431 +0000 UTC m=+3986.849488292" Oct 13 07:53:04 crc kubenswrapper[4664]: I1013 07:53:04.047446 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:53:04 crc kubenswrapper[4664]: E1013 07:53:04.048266 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:53:16 crc kubenswrapper[4664]: I1013 07:53:16.047287 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:53:16 crc kubenswrapper[4664]: E1013 07:53:16.048111 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:53:27 crc kubenswrapper[4664]: I1013 07:53:27.646243 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 07:53:27 crc kubenswrapper[4664]: I1013 07:53:27.776668 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-757f4d5bc7-72d99"] Oct 13 07:53:27 crc kubenswrapper[4664]: I1013 07:53:27.777731 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-757f4d5bc7-72d99" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-httpd" containerID="cri-o://a5751a966d89b818450155acb99025fc39018ee60ac69d1269424a004aa94e0b" gracePeriod=30 Oct 13 07:53:27 crc kubenswrapper[4664]: I1013 07:53:27.777731 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-757f4d5bc7-72d99" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-api" containerID="cri-o://e37381fa848a248f07e9bd62bba40f44b9ab1cb6563151d4cb26bcefacd306e7" gracePeriod=30 Oct 13 07:53:28 crc kubenswrapper[4664]: I1013 07:53:28.047978 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:53:28 crc kubenswrapper[4664]: E1013 07:53:28.048270 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:53:28 crc kubenswrapper[4664]: I1013 07:53:28.430186 4664 generic.go:334] "Generic (PLEG): container finished" podID="c6939aa3-6706-4731-b78f-f27013c60049" containerID="a5751a966d89b818450155acb99025fc39018ee60ac69d1269424a004aa94e0b" exitCode=0 Oct 13 07:53:28 crc kubenswrapper[4664]: I1013 07:53:28.430249 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-757f4d5bc7-72d99" event={"ID":"c6939aa3-6706-4731-b78f-f27013c60049","Type":"ContainerDied","Data":"a5751a966d89b818450155acb99025fc39018ee60ac69d1269424a004aa94e0b"} Oct 13 07:53:36 crc kubenswrapper[4664]: I1013 07:53:36.529411 4664 generic.go:334] "Generic (PLEG): container finished" podID="c6939aa3-6706-4731-b78f-f27013c60049" containerID="e37381fa848a248f07e9bd62bba40f44b9ab1cb6563151d4cb26bcefacd306e7" exitCode=0 Oct 13 07:53:36 crc kubenswrapper[4664]: I1013 07:53:36.529503 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-757f4d5bc7-72d99" event={"ID":"c6939aa3-6706-4731-b78f-f27013c60049","Type":"ContainerDied","Data":"e37381fa848a248f07e9bd62bba40f44b9ab1cb6563151d4cb26bcefacd306e7"} Oct 13 07:53:36 crc kubenswrapper[4664]: I1013 07:53:36.943636 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.004711 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-httpd-config\") pod \"c6939aa3-6706-4731-b78f-f27013c60049\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.004838 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-public-tls-certs\") pod \"c6939aa3-6706-4731-b78f-f27013c60049\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.004909 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-internal-tls-certs\") pod \"c6939aa3-6706-4731-b78f-f27013c60049\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.005035 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-config\") pod \"c6939aa3-6706-4731-b78f-f27013c60049\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.005078 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2m4q5\" (UniqueName: \"kubernetes.io/projected/c6939aa3-6706-4731-b78f-f27013c60049-kube-api-access-2m4q5\") pod \"c6939aa3-6706-4731-b78f-f27013c60049\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.005211 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-ovndb-tls-certs\") pod \"c6939aa3-6706-4731-b78f-f27013c60049\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.005250 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-combined-ca-bundle\") pod \"c6939aa3-6706-4731-b78f-f27013c60049\" (UID: \"c6939aa3-6706-4731-b78f-f27013c60049\") " Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.024329 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "c6939aa3-6706-4731-b78f-f27013c60049" (UID: "c6939aa3-6706-4731-b78f-f27013c60049"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.027104 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6939aa3-6706-4731-b78f-f27013c60049-kube-api-access-2m4q5" (OuterVolumeSpecName: "kube-api-access-2m4q5") pod "c6939aa3-6706-4731-b78f-f27013c60049" (UID: "c6939aa3-6706-4731-b78f-f27013c60049"). InnerVolumeSpecName "kube-api-access-2m4q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.092593 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-config" (OuterVolumeSpecName: "config") pod "c6939aa3-6706-4731-b78f-f27013c60049" (UID: "c6939aa3-6706-4731-b78f-f27013c60049"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.096150 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c6939aa3-6706-4731-b78f-f27013c60049" (UID: "c6939aa3-6706-4731-b78f-f27013c60049"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.098727 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c6939aa3-6706-4731-b78f-f27013c60049" (UID: "c6939aa3-6706-4731-b78f-f27013c60049"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.108911 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "c6939aa3-6706-4731-b78f-f27013c60049" (UID: "c6939aa3-6706-4731-b78f-f27013c60049"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.111334 4664 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.111375 4664 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.111388 4664 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.111398 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-config\") on node \"crc\" DevicePath \"\"" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.111407 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2m4q5\" (UniqueName: \"kubernetes.io/projected/c6939aa3-6706-4731-b78f-f27013c60049-kube-api-access-2m4q5\") on node \"crc\" DevicePath \"\"" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.111415 4664 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.120097 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6939aa3-6706-4731-b78f-f27013c60049" (UID: "c6939aa3-6706-4731-b78f-f27013c60049"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.213299 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6939aa3-6706-4731-b78f-f27013c60049-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.564201 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-757f4d5bc7-72d99" event={"ID":"c6939aa3-6706-4731-b78f-f27013c60049","Type":"ContainerDied","Data":"2c95824f127f3681be1b0f7e8183116a96cbee4a8a315ef4d72ed04d840e45df"} Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.564260 4664 scope.go:117] "RemoveContainer" containerID="a5751a966d89b818450155acb99025fc39018ee60ac69d1269424a004aa94e0b" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.564346 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-757f4d5bc7-72d99" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.624447 4664 scope.go:117] "RemoveContainer" containerID="e37381fa848a248f07e9bd62bba40f44b9ab1cb6563151d4cb26bcefacd306e7" Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.632279 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-757f4d5bc7-72d99"] Oct 13 07:53:37 crc kubenswrapper[4664]: I1013 07:53:37.642276 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-757f4d5bc7-72d99"] Oct 13 07:53:39 crc kubenswrapper[4664]: I1013 07:53:39.064139 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6939aa3-6706-4731-b78f-f27013c60049" path="/var/lib/kubelet/pods/c6939aa3-6706-4731-b78f-f27013c60049/volumes" Oct 13 07:53:40 crc kubenswrapper[4664]: I1013 07:53:40.047241 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:53:40 crc kubenswrapper[4664]: E1013 07:53:40.047747 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:53:53 crc kubenswrapper[4664]: I1013 07:53:53.055701 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:53:53 crc kubenswrapper[4664]: E1013 07:53:53.057092 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:54:07 crc kubenswrapper[4664]: I1013 07:54:07.051770 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:54:07 crc kubenswrapper[4664]: E1013 07:54:07.054096 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:54:20 crc kubenswrapper[4664]: I1013 07:54:20.046728 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:54:20 crc kubenswrapper[4664]: E1013 07:54:20.047993 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:54:34 crc kubenswrapper[4664]: I1013 07:54:34.047292 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:54:34 crc kubenswrapper[4664]: E1013 07:54:34.048398 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:54:49 crc kubenswrapper[4664]: I1013 07:54:49.047503 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:54:49 crc kubenswrapper[4664]: E1013 07:54:49.048356 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:55:01 crc kubenswrapper[4664]: I1013 07:55:01.047279 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:55:01 crc kubenswrapper[4664]: E1013 07:55:01.048226 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:55:14 crc kubenswrapper[4664]: I1013 07:55:14.046585 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:55:14 crc kubenswrapper[4664]: E1013 07:55:14.047346 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:55:29 crc kubenswrapper[4664]: I1013 07:55:29.047608 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:55:29 crc kubenswrapper[4664]: E1013 07:55:29.048305 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 07:55:42 crc kubenswrapper[4664]: I1013 07:55:42.047112 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:55:42 crc kubenswrapper[4664]: I1013 07:55:42.948765 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130"} Oct 13 07:57:58 crc kubenswrapper[4664]: I1013 07:57:58.813862 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:57:58 crc kubenswrapper[4664]: I1013 07:57:58.815094 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:58:28 crc kubenswrapper[4664]: I1013 07:58:28.812725 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:58:28 crc kubenswrapper[4664]: I1013 07:58:28.813365 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:58:58 crc kubenswrapper[4664]: I1013 07:58:58.811773 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 07:58:58 crc kubenswrapper[4664]: I1013 07:58:58.812299 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 07:58:58 crc kubenswrapper[4664]: I1013 07:58:58.812343 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 07:58:58 crc kubenswrapper[4664]: I1013 07:58:58.813559 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 07:58:58 crc kubenswrapper[4664]: I1013 07:58:58.813624 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130" gracePeriod=600 Oct 13 07:58:59 crc kubenswrapper[4664]: E1013 07:58:59.010294 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35504ef1_729c_4404_bd49_0d82bf23ccbb.slice/crio-conmon-d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35504ef1_729c_4404_bd49_0d82bf23ccbb.slice/crio-d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130.scope\": RecentStats: unable to find data in memory cache]" Oct 13 07:58:59 crc kubenswrapper[4664]: I1013 07:58:59.028448 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130" exitCode=0 Oct 13 07:58:59 crc kubenswrapper[4664]: I1013 07:58:59.028563 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130"} Oct 13 07:58:59 crc kubenswrapper[4664]: I1013 07:58:59.028602 4664 scope.go:117] "RemoveContainer" containerID="4c3d45c1bff8201d985487cf3e1df523e1b8212d1004f9060c4fddc0eae1f2a3" Oct 13 07:59:00 crc kubenswrapper[4664]: I1013 07:59:00.045599 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e"} Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.183971 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6"] Oct 13 08:00:00 crc kubenswrapper[4664]: E1013 08:00:00.184941 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-httpd" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.184954 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-httpd" Oct 13 08:00:00 crc kubenswrapper[4664]: E1013 08:00:00.184975 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-api" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.184982 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-api" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.185169 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-httpd" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.185198 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6939aa3-6706-4731-b78f-f27013c60049" containerName="neutron-api" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.185998 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.191882 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.192159 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.193627 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6"] Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.239199 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2skp\" (UniqueName: \"kubernetes.io/projected/13feaed1-f47c-4636-b03a-ce11cdb2ae90-kube-api-access-r2skp\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.239264 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13feaed1-f47c-4636-b03a-ce11cdb2ae90-secret-volume\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.239424 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13feaed1-f47c-4636-b03a-ce11cdb2ae90-config-volume\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.341209 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2skp\" (UniqueName: \"kubernetes.io/projected/13feaed1-f47c-4636-b03a-ce11cdb2ae90-kube-api-access-r2skp\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.341441 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13feaed1-f47c-4636-b03a-ce11cdb2ae90-secret-volume\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.341566 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13feaed1-f47c-4636-b03a-ce11cdb2ae90-config-volume\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.342659 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13feaed1-f47c-4636-b03a-ce11cdb2ae90-config-volume\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.352676 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13feaed1-f47c-4636-b03a-ce11cdb2ae90-secret-volume\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.355819 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2skp\" (UniqueName: \"kubernetes.io/projected/13feaed1-f47c-4636-b03a-ce11cdb2ae90-kube-api-access-r2skp\") pod \"collect-profiles-29339040-r5mn6\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:00 crc kubenswrapper[4664]: I1013 08:00:00.562508 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:01 crc kubenswrapper[4664]: I1013 08:00:01.178670 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6"] Oct 13 08:00:01 crc kubenswrapper[4664]: I1013 08:00:01.700822 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" event={"ID":"13feaed1-f47c-4636-b03a-ce11cdb2ae90","Type":"ContainerStarted","Data":"a11ca31b840fe4681397382dd25aa869fb1d325cf4a911b9a175a8cfa5176004"} Oct 13 08:00:01 crc kubenswrapper[4664]: I1013 08:00:01.701205 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" event={"ID":"13feaed1-f47c-4636-b03a-ce11cdb2ae90","Type":"ContainerStarted","Data":"ddb21bd4131d6e2bb7481fab6857b325203232fbbf9ea399d96bdd9bbc5e512e"} Oct 13 08:00:01 crc kubenswrapper[4664]: I1013 08:00:01.731267 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" podStartSLOduration=1.731238074 podStartE2EDuration="1.731238074s" podCreationTimestamp="2025-10-13 08:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 08:00:01.721250172 +0000 UTC m=+4409.408695384" watchObservedRunningTime="2025-10-13 08:00:01.731238074 +0000 UTC m=+4409.418683346" Oct 13 08:00:02 crc kubenswrapper[4664]: I1013 08:00:02.712543 4664 generic.go:334] "Generic (PLEG): container finished" podID="13feaed1-f47c-4636-b03a-ce11cdb2ae90" containerID="a11ca31b840fe4681397382dd25aa869fb1d325cf4a911b9a175a8cfa5176004" exitCode=0 Oct 13 08:00:02 crc kubenswrapper[4664]: I1013 08:00:02.712588 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" event={"ID":"13feaed1-f47c-4636-b03a-ce11cdb2ae90","Type":"ContainerDied","Data":"a11ca31b840fe4681397382dd25aa869fb1d325cf4a911b9a175a8cfa5176004"} Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.689919 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.771939 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2skp\" (UniqueName: \"kubernetes.io/projected/13feaed1-f47c-4636-b03a-ce11cdb2ae90-kube-api-access-r2skp\") pod \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.772004 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13feaed1-f47c-4636-b03a-ce11cdb2ae90-secret-volume\") pod \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.772053 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13feaed1-f47c-4636-b03a-ce11cdb2ae90-config-volume\") pod \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\" (UID: \"13feaed1-f47c-4636-b03a-ce11cdb2ae90\") " Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.773452 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13feaed1-f47c-4636-b03a-ce11cdb2ae90-config-volume" (OuterVolumeSpecName: "config-volume") pod "13feaed1-f47c-4636-b03a-ce11cdb2ae90" (UID: "13feaed1-f47c-4636-b03a-ce11cdb2ae90"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.786901 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13feaed1-f47c-4636-b03a-ce11cdb2ae90-kube-api-access-r2skp" (OuterVolumeSpecName: "kube-api-access-r2skp") pod "13feaed1-f47c-4636-b03a-ce11cdb2ae90" (UID: "13feaed1-f47c-4636-b03a-ce11cdb2ae90"). InnerVolumeSpecName "kube-api-access-r2skp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.791633 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13feaed1-f47c-4636-b03a-ce11cdb2ae90-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "13feaed1-f47c-4636-b03a-ce11cdb2ae90" (UID: "13feaed1-f47c-4636-b03a-ce11cdb2ae90"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.796254 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" event={"ID":"13feaed1-f47c-4636-b03a-ce11cdb2ae90","Type":"ContainerDied","Data":"ddb21bd4131d6e2bb7481fab6857b325203232fbbf9ea399d96bdd9bbc5e512e"} Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.796288 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddb21bd4131d6e2bb7481fab6857b325203232fbbf9ea399d96bdd9bbc5e512e" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.796306 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.873943 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2skp\" (UniqueName: \"kubernetes.io/projected/13feaed1-f47c-4636-b03a-ce11cdb2ae90-kube-api-access-r2skp\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.874138 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13feaed1-f47c-4636-b03a-ce11cdb2ae90-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:04 crc kubenswrapper[4664]: I1013 08:00:04.874201 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13feaed1-f47c-4636-b03a-ce11cdb2ae90-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:05 crc kubenswrapper[4664]: I1013 08:00:05.777402 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx"] Oct 13 08:00:05 crc kubenswrapper[4664]: I1013 08:00:05.785541 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29338995-9wbwx"] Oct 13 08:00:07 crc kubenswrapper[4664]: I1013 08:00:07.058011 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac51715e-8507-4676-89b6-f76b6419e7d1" path="/var/lib/kubelet/pods/ac51715e-8507-4676-89b6-f76b6419e7d1/volumes" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.419675 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-62fst"] Oct 13 08:00:09 crc kubenswrapper[4664]: E1013 08:00:09.420574 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13feaed1-f47c-4636-b03a-ce11cdb2ae90" containerName="collect-profiles" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.420590 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="13feaed1-f47c-4636-b03a-ce11cdb2ae90" containerName="collect-profiles" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.420876 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="13feaed1-f47c-4636-b03a-ce11cdb2ae90" containerName="collect-profiles" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.423199 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.433431 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-62fst"] Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.565679 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-utilities\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.565770 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-catalog-content\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.566117 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dk77\" (UniqueName: \"kubernetes.io/projected/f99a4d5a-0399-4c8f-8310-89af8d59de31-kube-api-access-4dk77\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.667407 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dk77\" (UniqueName: \"kubernetes.io/projected/f99a4d5a-0399-4c8f-8310-89af8d59de31-kube-api-access-4dk77\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.667541 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-utilities\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.667583 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-catalog-content\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.668068 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-utilities\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.668155 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-catalog-content\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.695026 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dk77\" (UniqueName: \"kubernetes.io/projected/f99a4d5a-0399-4c8f-8310-89af8d59de31-kube-api-access-4dk77\") pod \"redhat-operators-62fst\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:09 crc kubenswrapper[4664]: I1013 08:00:09.746446 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:10 crc kubenswrapper[4664]: W1013 08:00:10.229013 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf99a4d5a_0399_4c8f_8310_89af8d59de31.slice/crio-9b432d6a4f5e76bcea09084d692cb64c631cfd45b9ecb0689f3e47c69f407b0d WatchSource:0}: Error finding container 9b432d6a4f5e76bcea09084d692cb64c631cfd45b9ecb0689f3e47c69f407b0d: Status 404 returned error can't find the container with id 9b432d6a4f5e76bcea09084d692cb64c631cfd45b9ecb0689f3e47c69f407b0d Oct 13 08:00:10 crc kubenswrapper[4664]: I1013 08:00:10.230365 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-62fst"] Oct 13 08:00:10 crc kubenswrapper[4664]: I1013 08:00:10.856959 4664 generic.go:334] "Generic (PLEG): container finished" podID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerID="e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935" exitCode=0 Oct 13 08:00:10 crc kubenswrapper[4664]: I1013 08:00:10.857002 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-62fst" event={"ID":"f99a4d5a-0399-4c8f-8310-89af8d59de31","Type":"ContainerDied","Data":"e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935"} Oct 13 08:00:10 crc kubenswrapper[4664]: I1013 08:00:10.857293 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-62fst" event={"ID":"f99a4d5a-0399-4c8f-8310-89af8d59de31","Type":"ContainerStarted","Data":"9b432d6a4f5e76bcea09084d692cb64c631cfd45b9ecb0689f3e47c69f407b0d"} Oct 13 08:00:10 crc kubenswrapper[4664]: I1013 08:00:10.862700 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:00:12 crc kubenswrapper[4664]: I1013 08:00:12.874241 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-62fst" event={"ID":"f99a4d5a-0399-4c8f-8310-89af8d59de31","Type":"ContainerStarted","Data":"b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599"} Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.595941 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hltl6"] Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.603250 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.659353 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hltl6"] Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.786790 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfnx8\" (UniqueName: \"kubernetes.io/projected/6956a44a-f683-4823-a1e8-e4781a441d6c-kube-api-access-wfnx8\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.786902 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-catalog-content\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.787067 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-utilities\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.888465 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-catalog-content\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.888608 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-utilities\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.888659 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfnx8\" (UniqueName: \"kubernetes.io/projected/6956a44a-f683-4823-a1e8-e4781a441d6c-kube-api-access-wfnx8\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.889005 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-catalog-content\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.889208 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-utilities\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.921706 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfnx8\" (UniqueName: \"kubernetes.io/projected/6956a44a-f683-4823-a1e8-e4781a441d6c-kube-api-access-wfnx8\") pod \"redhat-marketplace-hltl6\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:14 crc kubenswrapper[4664]: I1013 08:00:14.936200 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:15 crc kubenswrapper[4664]: I1013 08:00:15.416671 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hltl6"] Oct 13 08:00:15 crc kubenswrapper[4664]: I1013 08:00:15.903687 4664 generic.go:334] "Generic (PLEG): container finished" podID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerID="8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56" exitCode=0 Oct 13 08:00:15 crc kubenswrapper[4664]: I1013 08:00:15.903725 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hltl6" event={"ID":"6956a44a-f683-4823-a1e8-e4781a441d6c","Type":"ContainerDied","Data":"8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56"} Oct 13 08:00:15 crc kubenswrapper[4664]: I1013 08:00:15.903963 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hltl6" event={"ID":"6956a44a-f683-4823-a1e8-e4781a441d6c","Type":"ContainerStarted","Data":"fae458b2232320e2af344c5e55c273890fbcd9f75e3f3cf0f919154b9f7881fc"} Oct 13 08:00:16 crc kubenswrapper[4664]: I1013 08:00:16.914640 4664 generic.go:334] "Generic (PLEG): container finished" podID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerID="b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599" exitCode=0 Oct 13 08:00:16 crc kubenswrapper[4664]: I1013 08:00:16.914702 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-62fst" event={"ID":"f99a4d5a-0399-4c8f-8310-89af8d59de31","Type":"ContainerDied","Data":"b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599"} Oct 13 08:00:16 crc kubenswrapper[4664]: I1013 08:00:16.918691 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hltl6" event={"ID":"6956a44a-f683-4823-a1e8-e4781a441d6c","Type":"ContainerStarted","Data":"49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e"} Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.592698 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q5xck"] Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.594608 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.619736 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q5xck"] Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.675170 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkpgr\" (UniqueName: \"kubernetes.io/projected/f419078e-b8c3-4dbd-a5d6-acae0c14847e-kube-api-access-tkpgr\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.675269 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-catalog-content\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.675358 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-utilities\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.776597 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-utilities\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.776727 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkpgr\" (UniqueName: \"kubernetes.io/projected/f419078e-b8c3-4dbd-a5d6-acae0c14847e-kube-api-access-tkpgr\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.776769 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-catalog-content\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.777089 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-utilities\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.778311 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-catalog-content\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.798948 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkpgr\" (UniqueName: \"kubernetes.io/projected/f419078e-b8c3-4dbd-a5d6-acae0c14847e-kube-api-access-tkpgr\") pod \"community-operators-q5xck\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.916546 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.983503 4664 generic.go:334] "Generic (PLEG): container finished" podID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerID="49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e" exitCode=0 Oct 13 08:00:17 crc kubenswrapper[4664]: I1013 08:00:17.983544 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hltl6" event={"ID":"6956a44a-f683-4823-a1e8-e4781a441d6c","Type":"ContainerDied","Data":"49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e"} Oct 13 08:00:18 crc kubenswrapper[4664]: I1013 08:00:18.660419 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q5xck"] Oct 13 08:00:18 crc kubenswrapper[4664]: W1013 08:00:18.672059 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf419078e_b8c3_4dbd_a5d6_acae0c14847e.slice/crio-2c60a3fe163770087232455320ee7a648f49047a84058580a15d270c309445c5 WatchSource:0}: Error finding container 2c60a3fe163770087232455320ee7a648f49047a84058580a15d270c309445c5: Status 404 returned error can't find the container with id 2c60a3fe163770087232455320ee7a648f49047a84058580a15d270c309445c5 Oct 13 08:00:18 crc kubenswrapper[4664]: I1013 08:00:18.993397 4664 generic.go:334] "Generic (PLEG): container finished" podID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerID="76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966" exitCode=0 Oct 13 08:00:18 crc kubenswrapper[4664]: I1013 08:00:18.993465 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5xck" event={"ID":"f419078e-b8c3-4dbd-a5d6-acae0c14847e","Type":"ContainerDied","Data":"76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966"} Oct 13 08:00:18 crc kubenswrapper[4664]: I1013 08:00:18.993490 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5xck" event={"ID":"f419078e-b8c3-4dbd-a5d6-acae0c14847e","Type":"ContainerStarted","Data":"2c60a3fe163770087232455320ee7a648f49047a84058580a15d270c309445c5"} Oct 13 08:00:18 crc kubenswrapper[4664]: I1013 08:00:18.997467 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-62fst" event={"ID":"f99a4d5a-0399-4c8f-8310-89af8d59de31","Type":"ContainerStarted","Data":"b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad"} Oct 13 08:00:18 crc kubenswrapper[4664]: I1013 08:00:18.999461 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hltl6" event={"ID":"6956a44a-f683-4823-a1e8-e4781a441d6c","Type":"ContainerStarted","Data":"91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419"} Oct 13 08:00:19 crc kubenswrapper[4664]: I1013 08:00:19.043489 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hltl6" podStartSLOduration=2.455368422 podStartE2EDuration="5.043469761s" podCreationTimestamp="2025-10-13 08:00:14 +0000 UTC" firstStartedPulling="2025-10-13 08:00:15.90551919 +0000 UTC m=+4423.592964382" lastFinishedPulling="2025-10-13 08:00:18.493620529 +0000 UTC m=+4426.181065721" observedRunningTime="2025-10-13 08:00:19.036228735 +0000 UTC m=+4426.723673917" watchObservedRunningTime="2025-10-13 08:00:19.043469761 +0000 UTC m=+4426.730914953" Oct 13 08:00:19 crc kubenswrapper[4664]: I1013 08:00:19.066204 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-62fst" podStartSLOduration=3.224758787 podStartE2EDuration="10.066183695s" podCreationTimestamp="2025-10-13 08:00:09 +0000 UTC" firstStartedPulling="2025-10-13 08:00:10.858980542 +0000 UTC m=+4418.546425734" lastFinishedPulling="2025-10-13 08:00:17.70040545 +0000 UTC m=+4425.387850642" observedRunningTime="2025-10-13 08:00:19.063967036 +0000 UTC m=+4426.751412228" watchObservedRunningTime="2025-10-13 08:00:19.066183695 +0000 UTC m=+4426.753628887" Oct 13 08:00:19 crc kubenswrapper[4664]: I1013 08:00:19.746905 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:19 crc kubenswrapper[4664]: I1013 08:00:19.747392 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:20 crc kubenswrapper[4664]: I1013 08:00:20.008035 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5xck" event={"ID":"f419078e-b8c3-4dbd-a5d6-acae0c14847e","Type":"ContainerStarted","Data":"b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328"} Oct 13 08:00:20 crc kubenswrapper[4664]: I1013 08:00:20.789548 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-62fst" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" probeResult="failure" output=< Oct 13 08:00:20 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:00:20 crc kubenswrapper[4664]: > Oct 13 08:00:22 crc kubenswrapper[4664]: I1013 08:00:22.027348 4664 generic.go:334] "Generic (PLEG): container finished" podID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerID="b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328" exitCode=0 Oct 13 08:00:22 crc kubenswrapper[4664]: I1013 08:00:22.027507 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5xck" event={"ID":"f419078e-b8c3-4dbd-a5d6-acae0c14847e","Type":"ContainerDied","Data":"b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328"} Oct 13 08:00:24 crc kubenswrapper[4664]: I1013 08:00:24.046196 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5xck" event={"ID":"f419078e-b8c3-4dbd-a5d6-acae0c14847e","Type":"ContainerStarted","Data":"6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a"} Oct 13 08:00:24 crc kubenswrapper[4664]: I1013 08:00:24.076993 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q5xck" podStartSLOduration=3.213688664 podStartE2EDuration="7.076971946s" podCreationTimestamp="2025-10-13 08:00:17 +0000 UTC" firstStartedPulling="2025-10-13 08:00:18.995821481 +0000 UTC m=+4426.683266673" lastFinishedPulling="2025-10-13 08:00:22.859104763 +0000 UTC m=+4430.546549955" observedRunningTime="2025-10-13 08:00:24.065841904 +0000 UTC m=+4431.753287116" watchObservedRunningTime="2025-10-13 08:00:24.076971946 +0000 UTC m=+4431.764417138" Oct 13 08:00:24 crc kubenswrapper[4664]: I1013 08:00:24.938303 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:24 crc kubenswrapper[4664]: I1013 08:00:24.938367 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:24 crc kubenswrapper[4664]: I1013 08:00:24.985762 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:25 crc kubenswrapper[4664]: I1013 08:00:25.635743 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:26 crc kubenswrapper[4664]: I1013 08:00:26.179219 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hltl6"] Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.078385 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hltl6" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="registry-server" containerID="cri-o://91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419" gracePeriod=2 Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.613835 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.775393 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfnx8\" (UniqueName: \"kubernetes.io/projected/6956a44a-f683-4823-a1e8-e4781a441d6c-kube-api-access-wfnx8\") pod \"6956a44a-f683-4823-a1e8-e4781a441d6c\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.775680 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-utilities\") pod \"6956a44a-f683-4823-a1e8-e4781a441d6c\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.775727 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-catalog-content\") pod \"6956a44a-f683-4823-a1e8-e4781a441d6c\" (UID: \"6956a44a-f683-4823-a1e8-e4781a441d6c\") " Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.776920 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-utilities" (OuterVolumeSpecName: "utilities") pod "6956a44a-f683-4823-a1e8-e4781a441d6c" (UID: "6956a44a-f683-4823-a1e8-e4781a441d6c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.785199 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6956a44a-f683-4823-a1e8-e4781a441d6c-kube-api-access-wfnx8" (OuterVolumeSpecName: "kube-api-access-wfnx8") pod "6956a44a-f683-4823-a1e8-e4781a441d6c" (UID: "6956a44a-f683-4823-a1e8-e4781a441d6c"). InnerVolumeSpecName "kube-api-access-wfnx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.799236 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6956a44a-f683-4823-a1e8-e4781a441d6c" (UID: "6956a44a-f683-4823-a1e8-e4781a441d6c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.877927 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfnx8\" (UniqueName: \"kubernetes.io/projected/6956a44a-f683-4823-a1e8-e4781a441d6c-kube-api-access-wfnx8\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.877954 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.877965 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6956a44a-f683-4823-a1e8-e4781a441d6c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.917672 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:27 crc kubenswrapper[4664]: I1013 08:00:27.917725 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.088844 4664 generic.go:334] "Generic (PLEG): container finished" podID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerID="91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419" exitCode=0 Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.088987 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hltl6" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.089008 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hltl6" event={"ID":"6956a44a-f683-4823-a1e8-e4781a441d6c","Type":"ContainerDied","Data":"91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419"} Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.090008 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hltl6" event={"ID":"6956a44a-f683-4823-a1e8-e4781a441d6c","Type":"ContainerDied","Data":"fae458b2232320e2af344c5e55c273890fbcd9f75e3f3cf0f919154b9f7881fc"} Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.090037 4664 scope.go:117] "RemoveContainer" containerID="91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.144138 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hltl6"] Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.145414 4664 scope.go:117] "RemoveContainer" containerID="49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.163981 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hltl6"] Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.184482 4664 scope.go:117] "RemoveContainer" containerID="8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.236464 4664 scope.go:117] "RemoveContainer" containerID="91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419" Oct 13 08:00:28 crc kubenswrapper[4664]: E1013 08:00:28.251449 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419\": container with ID starting with 91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419 not found: ID does not exist" containerID="91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.251536 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419"} err="failed to get container status \"91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419\": rpc error: code = NotFound desc = could not find container \"91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419\": container with ID starting with 91f293add32219dedd63e4a133910c125fb6fddd6d7e8583575c0da8e379e419 not found: ID does not exist" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.251571 4664 scope.go:117] "RemoveContainer" containerID="49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e" Oct 13 08:00:28 crc kubenswrapper[4664]: E1013 08:00:28.252142 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e\": container with ID starting with 49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e not found: ID does not exist" containerID="49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.252168 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e"} err="failed to get container status \"49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e\": rpc error: code = NotFound desc = could not find container \"49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e\": container with ID starting with 49c123b29443d44f2654396b21c33ca22a826fc7998f1bebbc5ccf48a79fef5e not found: ID does not exist" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.252186 4664 scope.go:117] "RemoveContainer" containerID="8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56" Oct 13 08:00:28 crc kubenswrapper[4664]: E1013 08:00:28.253901 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56\": container with ID starting with 8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56 not found: ID does not exist" containerID="8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.253932 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56"} err="failed to get container status \"8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56\": rpc error: code = NotFound desc = could not find container \"8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56\": container with ID starting with 8a0e1715b0862864a1d750648ab8e74d84e1e6179a7e80809e390ec958e3ca56 not found: ID does not exist" Oct 13 08:00:28 crc kubenswrapper[4664]: I1013 08:00:28.963154 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-q5xck" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="registry-server" probeResult="failure" output=< Oct 13 08:00:28 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:00:28 crc kubenswrapper[4664]: > Oct 13 08:00:29 crc kubenswrapper[4664]: I1013 08:00:29.074520 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" path="/var/lib/kubelet/pods/6956a44a-f683-4823-a1e8-e4781a441d6c/volumes" Oct 13 08:00:30 crc kubenswrapper[4664]: I1013 08:00:30.797519 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-62fst" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" probeResult="failure" output=< Oct 13 08:00:30 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:00:30 crc kubenswrapper[4664]: > Oct 13 08:00:37 crc kubenswrapper[4664]: I1013 08:00:37.976307 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:38 crc kubenswrapper[4664]: I1013 08:00:38.032461 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:38 crc kubenswrapper[4664]: I1013 08:00:38.219040 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q5xck"] Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.187219 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q5xck" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="registry-server" containerID="cri-o://6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a" gracePeriod=2 Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.723160 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.819831 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-catalog-content\") pod \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.819951 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-utilities\") pod \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.820061 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkpgr\" (UniqueName: \"kubernetes.io/projected/f419078e-b8c3-4dbd-a5d6-acae0c14847e-kube-api-access-tkpgr\") pod \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\" (UID: \"f419078e-b8c3-4dbd-a5d6-acae0c14847e\") " Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.821676 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-utilities" (OuterVolumeSpecName: "utilities") pod "f419078e-b8c3-4dbd-a5d6-acae0c14847e" (UID: "f419078e-b8c3-4dbd-a5d6-acae0c14847e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.836116 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f419078e-b8c3-4dbd-a5d6-acae0c14847e-kube-api-access-tkpgr" (OuterVolumeSpecName: "kube-api-access-tkpgr") pod "f419078e-b8c3-4dbd-a5d6-acae0c14847e" (UID: "f419078e-b8c3-4dbd-a5d6-acae0c14847e"). InnerVolumeSpecName "kube-api-access-tkpgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.899293 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f419078e-b8c3-4dbd-a5d6-acae0c14847e" (UID: "f419078e-b8c3-4dbd-a5d6-acae0c14847e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.922350 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkpgr\" (UniqueName: \"kubernetes.io/projected/f419078e-b8c3-4dbd-a5d6-acae0c14847e-kube-api-access-tkpgr\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.922412 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:39 crc kubenswrapper[4664]: I1013 08:00:39.922422 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f419078e-b8c3-4dbd-a5d6-acae0c14847e-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.203412 4664 generic.go:334] "Generic (PLEG): container finished" podID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerID="6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a" exitCode=0 Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.203463 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5xck" event={"ID":"f419078e-b8c3-4dbd-a5d6-acae0c14847e","Type":"ContainerDied","Data":"6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a"} Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.203495 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5xck" event={"ID":"f419078e-b8c3-4dbd-a5d6-acae0c14847e","Type":"ContainerDied","Data":"2c60a3fe163770087232455320ee7a648f49047a84058580a15d270c309445c5"} Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.203499 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5xck" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.203517 4664 scope.go:117] "RemoveContainer" containerID="6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.252965 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q5xck"] Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.267692 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q5xck"] Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.276206 4664 scope.go:117] "RemoveContainer" containerID="b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.304734 4664 scope.go:117] "RemoveContainer" containerID="76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.357619 4664 scope.go:117] "RemoveContainer" containerID="6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a" Oct 13 08:00:40 crc kubenswrapper[4664]: E1013 08:00:40.358170 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a\": container with ID starting with 6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a not found: ID does not exist" containerID="6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.358230 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a"} err="failed to get container status \"6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a\": rpc error: code = NotFound desc = could not find container \"6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a\": container with ID starting with 6ed79374ddbf0f5e1706ef479a76c0b1cfb81fabc9cd606d64666fcf89d0233a not found: ID does not exist" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.358266 4664 scope.go:117] "RemoveContainer" containerID="b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328" Oct 13 08:00:40 crc kubenswrapper[4664]: E1013 08:00:40.359209 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328\": container with ID starting with b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328 not found: ID does not exist" containerID="b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.359633 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328"} err="failed to get container status \"b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328\": rpc error: code = NotFound desc = could not find container \"b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328\": container with ID starting with b073aed9976fcb2f943cd1bf78befc9b4e7a3ef2e6811748600d43976cd7f328 not found: ID does not exist" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.359700 4664 scope.go:117] "RemoveContainer" containerID="76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966" Oct 13 08:00:40 crc kubenswrapper[4664]: E1013 08:00:40.360519 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966\": container with ID starting with 76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966 not found: ID does not exist" containerID="76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.360555 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966"} err="failed to get container status \"76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966\": rpc error: code = NotFound desc = could not find container \"76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966\": container with ID starting with 76832a69dd0aed443cf29ef56d3ecd3df48e73c2b8fccd0584e13223ca1be966 not found: ID does not exist" Oct 13 08:00:40 crc kubenswrapper[4664]: I1013 08:00:40.805220 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-62fst" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" probeResult="failure" output=< Oct 13 08:00:40 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:00:40 crc kubenswrapper[4664]: > Oct 13 08:00:41 crc kubenswrapper[4664]: I1013 08:00:41.056193 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" path="/var/lib/kubelet/pods/f419078e-b8c3-4dbd-a5d6-acae0c14847e/volumes" Oct 13 08:00:42 crc kubenswrapper[4664]: I1013 08:00:42.068087 4664 scope.go:117] "RemoveContainer" containerID="2836ade4fdbf931d7f75665c43b584016de43038d3f90bd67a7e422e7a989634" Oct 13 08:00:50 crc kubenswrapper[4664]: I1013 08:00:50.895330 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-62fst" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" probeResult="failure" output=< Oct 13 08:00:50 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:00:50 crc kubenswrapper[4664]: > Oct 13 08:00:59 crc kubenswrapper[4664]: I1013 08:00:59.812938 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:00:59 crc kubenswrapper[4664]: I1013 08:00:59.873757 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.067267 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-62fst"] Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.169828 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29339041-6qn7p"] Oct 13 08:01:00 crc kubenswrapper[4664]: E1013 08:01:00.170314 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="extract-content" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170335 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="extract-content" Oct 13 08:01:00 crc kubenswrapper[4664]: E1013 08:01:00.170355 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="extract-utilities" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170364 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="extract-utilities" Oct 13 08:01:00 crc kubenswrapper[4664]: E1013 08:01:00.170385 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="registry-server" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170394 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="registry-server" Oct 13 08:01:00 crc kubenswrapper[4664]: E1013 08:01:00.170414 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="registry-server" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170422 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="registry-server" Oct 13 08:01:00 crc kubenswrapper[4664]: E1013 08:01:00.170437 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="extract-utilities" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170445 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="extract-utilities" Oct 13 08:01:00 crc kubenswrapper[4664]: E1013 08:01:00.170463 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="extract-content" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170471 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="extract-content" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170688 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f419078e-b8c3-4dbd-a5d6-acae0c14847e" containerName="registry-server" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.170723 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6956a44a-f683-4823-a1e8-e4781a441d6c" containerName="registry-server" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.171458 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.222816 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29339041-6qn7p"] Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.223977 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-combined-ca-bundle\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.224025 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-fernet-keys\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.224056 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l48gc\" (UniqueName: \"kubernetes.io/projected/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-kube-api-access-l48gc\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.224080 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-config-data\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.325544 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l48gc\" (UniqueName: \"kubernetes.io/projected/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-kube-api-access-l48gc\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.325604 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-config-data\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.325767 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-combined-ca-bundle\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.325833 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-fernet-keys\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.333567 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-fernet-keys\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.333653 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-combined-ca-bundle\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.335312 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-config-data\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.346122 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l48gc\" (UniqueName: \"kubernetes.io/projected/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-kube-api-access-l48gc\") pod \"keystone-cron-29339041-6qn7p\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:00 crc kubenswrapper[4664]: I1013 08:01:00.504760 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.034371 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29339041-6qn7p"] Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.425221 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-62fst" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" containerID="cri-o://b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad" gracePeriod=2 Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.425675 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339041-6qn7p" event={"ID":"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9","Type":"ContainerStarted","Data":"20f2f2d663a576937cdef67d572beb3b493cadf8ff8dfe1b003f0c7287afb1b8"} Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.425710 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339041-6qn7p" event={"ID":"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9","Type":"ContainerStarted","Data":"4d9b0c92d1d879d1a0b4ba05c002898dec1975c95d8be02c0b247b400770578e"} Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.446221 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29339041-6qn7p" podStartSLOduration=1.44620409 podStartE2EDuration="1.44620409s" podCreationTimestamp="2025-10-13 08:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 08:01:01.445888912 +0000 UTC m=+4469.133334134" watchObservedRunningTime="2025-10-13 08:01:01.44620409 +0000 UTC m=+4469.133649282" Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.900275 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.963771 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-catalog-content\") pod \"f99a4d5a-0399-4c8f-8310-89af8d59de31\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.963931 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-utilities\") pod \"f99a4d5a-0399-4c8f-8310-89af8d59de31\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.964114 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dk77\" (UniqueName: \"kubernetes.io/projected/f99a4d5a-0399-4c8f-8310-89af8d59de31-kube-api-access-4dk77\") pod \"f99a4d5a-0399-4c8f-8310-89af8d59de31\" (UID: \"f99a4d5a-0399-4c8f-8310-89af8d59de31\") " Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.965876 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-utilities" (OuterVolumeSpecName: "utilities") pod "f99a4d5a-0399-4c8f-8310-89af8d59de31" (UID: "f99a4d5a-0399-4c8f-8310-89af8d59de31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:01:01 crc kubenswrapper[4664]: I1013 08:01:01.976862 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f99a4d5a-0399-4c8f-8310-89af8d59de31-kube-api-access-4dk77" (OuterVolumeSpecName: "kube-api-access-4dk77") pod "f99a4d5a-0399-4c8f-8310-89af8d59de31" (UID: "f99a4d5a-0399-4c8f-8310-89af8d59de31"). InnerVolumeSpecName "kube-api-access-4dk77". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.066277 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dk77\" (UniqueName: \"kubernetes.io/projected/f99a4d5a-0399-4c8f-8310-89af8d59de31-kube-api-access-4dk77\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.066307 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.067888 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f99a4d5a-0399-4c8f-8310-89af8d59de31" (UID: "f99a4d5a-0399-4c8f-8310-89af8d59de31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.168005 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99a4d5a-0399-4c8f-8310-89af8d59de31-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.434581 4664 generic.go:334] "Generic (PLEG): container finished" podID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerID="b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad" exitCode=0 Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.434643 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-62fst" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.434648 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-62fst" event={"ID":"f99a4d5a-0399-4c8f-8310-89af8d59de31","Type":"ContainerDied","Data":"b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad"} Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.434684 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-62fst" event={"ID":"f99a4d5a-0399-4c8f-8310-89af8d59de31","Type":"ContainerDied","Data":"9b432d6a4f5e76bcea09084d692cb64c631cfd45b9ecb0689f3e47c69f407b0d"} Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.434699 4664 scope.go:117] "RemoveContainer" containerID="b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.458343 4664 scope.go:117] "RemoveContainer" containerID="b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.482048 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-62fst"] Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.489771 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-62fst"] Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.521987 4664 scope.go:117] "RemoveContainer" containerID="e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.555733 4664 scope.go:117] "RemoveContainer" containerID="b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad" Oct 13 08:01:02 crc kubenswrapper[4664]: E1013 08:01:02.558041 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad\": container with ID starting with b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad not found: ID does not exist" containerID="b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.558075 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad"} err="failed to get container status \"b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad\": rpc error: code = NotFound desc = could not find container \"b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad\": container with ID starting with b116f2d66ee8be4233f27548ed428366459995cb5f83ae8866fc9017b90017ad not found: ID does not exist" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.558097 4664 scope.go:117] "RemoveContainer" containerID="b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599" Oct 13 08:01:02 crc kubenswrapper[4664]: E1013 08:01:02.560655 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599\": container with ID starting with b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599 not found: ID does not exist" containerID="b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.560696 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599"} err="failed to get container status \"b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599\": rpc error: code = NotFound desc = could not find container \"b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599\": container with ID starting with b59849420f8894bce2dd5d8f35e42d1999dd85c469a8148cbe84ab8ccc157599 not found: ID does not exist" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.560722 4664 scope.go:117] "RemoveContainer" containerID="e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935" Oct 13 08:01:02 crc kubenswrapper[4664]: E1013 08:01:02.560992 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935\": container with ID starting with e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935 not found: ID does not exist" containerID="e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935" Oct 13 08:01:02 crc kubenswrapper[4664]: I1013 08:01:02.561010 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935"} err="failed to get container status \"e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935\": rpc error: code = NotFound desc = could not find container \"e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935\": container with ID starting with e4770157bca41d4b96e3c5722733008cb05b787ca058c5cc62c18bd48f436935 not found: ID does not exist" Oct 13 08:01:03 crc kubenswrapper[4664]: I1013 08:01:03.057982 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" path="/var/lib/kubelet/pods/f99a4d5a-0399-4c8f-8310-89af8d59de31/volumes" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.467651 4664 generic.go:334] "Generic (PLEG): container finished" podID="41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" containerID="20f2f2d663a576937cdef67d572beb3b493cadf8ff8dfe1b003f0c7287afb1b8" exitCode=0 Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.468227 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339041-6qn7p" event={"ID":"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9","Type":"ContainerDied","Data":"20f2f2d663a576937cdef67d572beb3b493cadf8ff8dfe1b003f0c7287afb1b8"} Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.486523 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lhxgm"] Oct 13 08:01:05 crc kubenswrapper[4664]: E1013 08:01:05.486926 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.486946 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" Oct 13 08:01:05 crc kubenswrapper[4664]: E1013 08:01:05.486957 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="extract-content" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.486963 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="extract-content" Oct 13 08:01:05 crc kubenswrapper[4664]: E1013 08:01:05.487011 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="extract-utilities" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.487020 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="extract-utilities" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.487248 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f99a4d5a-0399-4c8f-8310-89af8d59de31" containerName="registry-server" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.488663 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.536375 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lhxgm"] Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.536442 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9kvx\" (UniqueName: \"kubernetes.io/projected/65ddc856-a0f2-4b18-8903-4d2e4e029753-kube-api-access-t9kvx\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.536605 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-catalog-content\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.536693 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-utilities\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.638440 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-utilities\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.638590 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9kvx\" (UniqueName: \"kubernetes.io/projected/65ddc856-a0f2-4b18-8903-4d2e4e029753-kube-api-access-t9kvx\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.638944 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-utilities\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.640836 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-catalog-content\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.641605 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-catalog-content\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.659527 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9kvx\" (UniqueName: \"kubernetes.io/projected/65ddc856-a0f2-4b18-8903-4d2e4e029753-kube-api-access-t9kvx\") pod \"certified-operators-lhxgm\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:05 crc kubenswrapper[4664]: I1013 08:01:05.809431 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:06 crc kubenswrapper[4664]: I1013 08:01:06.691905 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lhxgm"] Oct 13 08:01:06 crc kubenswrapper[4664]: W1013 08:01:06.707509 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65ddc856_a0f2_4b18_8903_4d2e4e029753.slice/crio-f683e63b02eca79d7508724868e58766b2d60b6638de07b32eb9a3a1c7d6258f WatchSource:0}: Error finding container f683e63b02eca79d7508724868e58766b2d60b6638de07b32eb9a3a1c7d6258f: Status 404 returned error can't find the container with id f683e63b02eca79d7508724868e58766b2d60b6638de07b32eb9a3a1c7d6258f Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.146560 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.172704 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-combined-ca-bundle\") pod \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.172784 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-config-data\") pod \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.173256 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-fernet-keys\") pod \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.173635 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l48gc\" (UniqueName: \"kubernetes.io/projected/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-kube-api-access-l48gc\") pod \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\" (UID: \"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9\") " Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.215416 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" (UID: "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.215527 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-kube-api-access-l48gc" (OuterVolumeSpecName: "kube-api-access-l48gc") pod "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" (UID: "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9"). InnerVolumeSpecName "kube-api-access-l48gc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.241439 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-config-data" (OuterVolumeSpecName: "config-data") pod "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" (UID: "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.259632 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" (UID: "41f22bd0-6f2c-4c59-838a-1cfe57b94ca9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.278852 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.278894 4664 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.278907 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l48gc\" (UniqueName: \"kubernetes.io/projected/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-kube-api-access-l48gc\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.278916 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f22bd0-6f2c-4c59-838a-1cfe57b94ca9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.500089 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339041-6qn7p" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.500082 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339041-6qn7p" event={"ID":"41f22bd0-6f2c-4c59-838a-1cfe57b94ca9","Type":"ContainerDied","Data":"4d9b0c92d1d879d1a0b4ba05c002898dec1975c95d8be02c0b247b400770578e"} Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.500306 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d9b0c92d1d879d1a0b4ba05c002898dec1975c95d8be02c0b247b400770578e" Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.502118 4664 generic.go:334] "Generic (PLEG): container finished" podID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerID="1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00" exitCode=0 Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.502181 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhxgm" event={"ID":"65ddc856-a0f2-4b18-8903-4d2e4e029753","Type":"ContainerDied","Data":"1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00"} Oct 13 08:01:07 crc kubenswrapper[4664]: I1013 08:01:07.502225 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhxgm" event={"ID":"65ddc856-a0f2-4b18-8903-4d2e4e029753","Type":"ContainerStarted","Data":"f683e63b02eca79d7508724868e58766b2d60b6638de07b32eb9a3a1c7d6258f"} Oct 13 08:01:08 crc kubenswrapper[4664]: I1013 08:01:08.511508 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhxgm" event={"ID":"65ddc856-a0f2-4b18-8903-4d2e4e029753","Type":"ContainerStarted","Data":"3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38"} Oct 13 08:01:10 crc kubenswrapper[4664]: I1013 08:01:10.556013 4664 generic.go:334] "Generic (PLEG): container finished" podID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerID="3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38" exitCode=0 Oct 13 08:01:10 crc kubenswrapper[4664]: I1013 08:01:10.556146 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhxgm" event={"ID":"65ddc856-a0f2-4b18-8903-4d2e4e029753","Type":"ContainerDied","Data":"3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38"} Oct 13 08:01:11 crc kubenswrapper[4664]: I1013 08:01:11.568783 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhxgm" event={"ID":"65ddc856-a0f2-4b18-8903-4d2e4e029753","Type":"ContainerStarted","Data":"74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c"} Oct 13 08:01:11 crc kubenswrapper[4664]: I1013 08:01:11.592361 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lhxgm" podStartSLOduration=3.068315532 podStartE2EDuration="6.592344522s" podCreationTimestamp="2025-10-13 08:01:05 +0000 UTC" firstStartedPulling="2025-10-13 08:01:07.503879365 +0000 UTC m=+4475.191324557" lastFinishedPulling="2025-10-13 08:01:11.027908355 +0000 UTC m=+4478.715353547" observedRunningTime="2025-10-13 08:01:11.588661673 +0000 UTC m=+4479.276106875" watchObservedRunningTime="2025-10-13 08:01:11.592344522 +0000 UTC m=+4479.279789714" Oct 13 08:01:15 crc kubenswrapper[4664]: I1013 08:01:15.810459 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:15 crc kubenswrapper[4664]: I1013 08:01:15.810745 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:16 crc kubenswrapper[4664]: I1013 08:01:16.231541 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:16 crc kubenswrapper[4664]: I1013 08:01:16.660974 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:16 crc kubenswrapper[4664]: I1013 08:01:16.707232 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lhxgm"] Oct 13 08:01:18 crc kubenswrapper[4664]: I1013 08:01:18.634945 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lhxgm" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="registry-server" containerID="cri-o://74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c" gracePeriod=2 Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.101939 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.205066 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-catalog-content\") pod \"65ddc856-a0f2-4b18-8903-4d2e4e029753\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.205142 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-utilities\") pod \"65ddc856-a0f2-4b18-8903-4d2e4e029753\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.205198 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9kvx\" (UniqueName: \"kubernetes.io/projected/65ddc856-a0f2-4b18-8903-4d2e4e029753-kube-api-access-t9kvx\") pod \"65ddc856-a0f2-4b18-8903-4d2e4e029753\" (UID: \"65ddc856-a0f2-4b18-8903-4d2e4e029753\") " Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.206098 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-utilities" (OuterVolumeSpecName: "utilities") pod "65ddc856-a0f2-4b18-8903-4d2e4e029753" (UID: "65ddc856-a0f2-4b18-8903-4d2e4e029753"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.215506 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65ddc856-a0f2-4b18-8903-4d2e4e029753-kube-api-access-t9kvx" (OuterVolumeSpecName: "kube-api-access-t9kvx") pod "65ddc856-a0f2-4b18-8903-4d2e4e029753" (UID: "65ddc856-a0f2-4b18-8903-4d2e4e029753"). InnerVolumeSpecName "kube-api-access-t9kvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.270902 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "65ddc856-a0f2-4b18-8903-4d2e4e029753" (UID: "65ddc856-a0f2-4b18-8903-4d2e4e029753"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.307439 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.307481 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ddc856-a0f2-4b18-8903-4d2e4e029753-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.307494 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9kvx\" (UniqueName: \"kubernetes.io/projected/65ddc856-a0f2-4b18-8903-4d2e4e029753-kube-api-access-t9kvx\") on node \"crc\" DevicePath \"\"" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.646715 4664 generic.go:334] "Generic (PLEG): container finished" podID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerID="74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c" exitCode=0 Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.646773 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhxgm" event={"ID":"65ddc856-a0f2-4b18-8903-4d2e4e029753","Type":"ContainerDied","Data":"74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c"} Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.646832 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhxgm" event={"ID":"65ddc856-a0f2-4b18-8903-4d2e4e029753","Type":"ContainerDied","Data":"f683e63b02eca79d7508724868e58766b2d60b6638de07b32eb9a3a1c7d6258f"} Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.646861 4664 scope.go:117] "RemoveContainer" containerID="74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.646944 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lhxgm" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.674819 4664 scope.go:117] "RemoveContainer" containerID="3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.702119 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lhxgm"] Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.716698 4664 scope.go:117] "RemoveContainer" containerID="1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.716768 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lhxgm"] Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.765949 4664 scope.go:117] "RemoveContainer" containerID="74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c" Oct 13 08:01:19 crc kubenswrapper[4664]: E1013 08:01:19.766435 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c\": container with ID starting with 74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c not found: ID does not exist" containerID="74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.766536 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c"} err="failed to get container status \"74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c\": rpc error: code = NotFound desc = could not find container \"74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c\": container with ID starting with 74dfaf1332af45a8c1db1eedaa83c2c58ec95712f4844d75ccb44628b729883c not found: ID does not exist" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.766632 4664 scope.go:117] "RemoveContainer" containerID="3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38" Oct 13 08:01:19 crc kubenswrapper[4664]: E1013 08:01:19.767164 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38\": container with ID starting with 3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38 not found: ID does not exist" containerID="3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.767197 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38"} err="failed to get container status \"3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38\": rpc error: code = NotFound desc = could not find container \"3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38\": container with ID starting with 3071ebee8abbca032b9182221e3794d2278b148d77a31177b7936540256e2b38 not found: ID does not exist" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.767222 4664 scope.go:117] "RemoveContainer" containerID="1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00" Oct 13 08:01:19 crc kubenswrapper[4664]: E1013 08:01:19.767503 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00\": container with ID starting with 1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00 not found: ID does not exist" containerID="1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00" Oct 13 08:01:19 crc kubenswrapper[4664]: I1013 08:01:19.767535 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00"} err="failed to get container status \"1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00\": rpc error: code = NotFound desc = could not find container \"1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00\": container with ID starting with 1302e0aed2fe76161028e5ee284fa956b290a06c3661dcf84c8de76cd803da00 not found: ID does not exist" Oct 13 08:01:21 crc kubenswrapper[4664]: I1013 08:01:21.065597 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" path="/var/lib/kubelet/pods/65ddc856-a0f2-4b18-8903-4d2e4e029753/volumes" Oct 13 08:01:28 crc kubenswrapper[4664]: I1013 08:01:28.812071 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:01:28 crc kubenswrapper[4664]: I1013 08:01:28.812490 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:01:58 crc kubenswrapper[4664]: I1013 08:01:58.811737 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:01:58 crc kubenswrapper[4664]: I1013 08:01:58.812330 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:02:28 crc kubenswrapper[4664]: I1013 08:02:28.812147 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:02:28 crc kubenswrapper[4664]: I1013 08:02:28.812717 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:02:28 crc kubenswrapper[4664]: I1013 08:02:28.812782 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:02:28 crc kubenswrapper[4664]: I1013 08:02:28.813629 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:02:28 crc kubenswrapper[4664]: I1013 08:02:28.813699 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" gracePeriod=600 Oct 13 08:02:28 crc kubenswrapper[4664]: E1013 08:02:28.954788 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:02:29 crc kubenswrapper[4664]: I1013 08:02:29.352585 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" exitCode=0 Oct 13 08:02:29 crc kubenswrapper[4664]: I1013 08:02:29.352660 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e"} Oct 13 08:02:29 crc kubenswrapper[4664]: I1013 08:02:29.352967 4664 scope.go:117] "RemoveContainer" containerID="d9f07e9ed67450ec0c469749b37a9c73df82f2808a6664d3db3d9bcff5e06130" Oct 13 08:02:29 crc kubenswrapper[4664]: I1013 08:02:29.353755 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:02:29 crc kubenswrapper[4664]: E1013 08:02:29.354194 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:02:40 crc kubenswrapper[4664]: I1013 08:02:40.047718 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:02:40 crc kubenswrapper[4664]: E1013 08:02:40.048935 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:02:51 crc kubenswrapper[4664]: I1013 08:02:51.046621 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:02:51 crc kubenswrapper[4664]: E1013 08:02:51.047434 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:03:05 crc kubenswrapper[4664]: I1013 08:03:05.047275 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:03:05 crc kubenswrapper[4664]: E1013 08:03:05.048188 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:03:16 crc kubenswrapper[4664]: I1013 08:03:16.046855 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:03:16 crc kubenswrapper[4664]: E1013 08:03:16.047709 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:03:27 crc kubenswrapper[4664]: I1013 08:03:27.047594 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:03:27 crc kubenswrapper[4664]: E1013 08:03:27.048757 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:03:39 crc kubenswrapper[4664]: I1013 08:03:39.047290 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:03:39 crc kubenswrapper[4664]: E1013 08:03:39.050203 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:03:45 crc kubenswrapper[4664]: E1013 08:03:45.208515 4664 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.223:56084->38.102.83.223:37357: read tcp 38.102.83.223:56084->38.102.83.223:37357: read: connection reset by peer Oct 13 08:03:50 crc kubenswrapper[4664]: I1013 08:03:50.046378 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:03:50 crc kubenswrapper[4664]: E1013 08:03:50.047149 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:04:01 crc kubenswrapper[4664]: I1013 08:04:01.047101 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:04:01 crc kubenswrapper[4664]: E1013 08:04:01.049265 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:04:15 crc kubenswrapper[4664]: I1013 08:04:15.047377 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:04:15 crc kubenswrapper[4664]: E1013 08:04:15.048177 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:04:27 crc kubenswrapper[4664]: I1013 08:04:27.048188 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:04:27 crc kubenswrapper[4664]: E1013 08:04:27.049033 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:04:39 crc kubenswrapper[4664]: I1013 08:04:39.049664 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:04:39 crc kubenswrapper[4664]: E1013 08:04:39.050451 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:04:53 crc kubenswrapper[4664]: I1013 08:04:53.055830 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:04:53 crc kubenswrapper[4664]: E1013 08:04:53.056819 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:05:05 crc kubenswrapper[4664]: I1013 08:05:05.047978 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:05:05 crc kubenswrapper[4664]: E1013 08:05:05.049199 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:05:17 crc kubenswrapper[4664]: I1013 08:05:17.047689 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:05:17 crc kubenswrapper[4664]: E1013 08:05:17.048251 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:05:32 crc kubenswrapper[4664]: I1013 08:05:32.047139 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:05:32 crc kubenswrapper[4664]: E1013 08:05:32.047864 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:05:44 crc kubenswrapper[4664]: I1013 08:05:44.047259 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:05:44 crc kubenswrapper[4664]: E1013 08:05:44.047946 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:05:59 crc kubenswrapper[4664]: I1013 08:05:59.047001 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:05:59 crc kubenswrapper[4664]: E1013 08:05:59.047694 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:06:10 crc kubenswrapper[4664]: I1013 08:06:10.047022 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:06:10 crc kubenswrapper[4664]: E1013 08:06:10.047666 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:06:21 crc kubenswrapper[4664]: I1013 08:06:21.047555 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:06:21 crc kubenswrapper[4664]: E1013 08:06:21.048404 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:06:34 crc kubenswrapper[4664]: I1013 08:06:34.047248 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:06:34 crc kubenswrapper[4664]: E1013 08:06:34.048110 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:06:47 crc kubenswrapper[4664]: I1013 08:06:47.047103 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:06:47 crc kubenswrapper[4664]: E1013 08:06:47.048187 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:07:00 crc kubenswrapper[4664]: I1013 08:07:00.047913 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:07:00 crc kubenswrapper[4664]: E1013 08:07:00.049126 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:07:14 crc kubenswrapper[4664]: I1013 08:07:14.047090 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:07:14 crc kubenswrapper[4664]: E1013 08:07:14.047894 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:07:26 crc kubenswrapper[4664]: I1013 08:07:26.047431 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:07:26 crc kubenswrapper[4664]: E1013 08:07:26.048298 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:07:37 crc kubenswrapper[4664]: I1013 08:07:37.047021 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:07:37 crc kubenswrapper[4664]: I1013 08:07:37.498888 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"b2e770bf3a3cd33389d514b5a270678710030e263ae285012a3e8b566241561e"} Oct 13 08:09:58 crc kubenswrapper[4664]: I1013 08:09:58.812581 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:09:58 crc kubenswrapper[4664]: I1013 08:09:58.813259 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.024056 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pj9w2"] Oct 13 08:10:24 crc kubenswrapper[4664]: E1013 08:10:24.025156 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="extract-content" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.025177 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="extract-content" Oct 13 08:10:24 crc kubenswrapper[4664]: E1013 08:10:24.025197 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="registry-server" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.025205 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="registry-server" Oct 13 08:10:24 crc kubenswrapper[4664]: E1013 08:10:24.025232 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" containerName="keystone-cron" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.025240 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" containerName="keystone-cron" Oct 13 08:10:24 crc kubenswrapper[4664]: E1013 08:10:24.025294 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="extract-utilities" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.025304 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="extract-utilities" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.025532 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="65ddc856-a0f2-4b18-8903-4d2e4e029753" containerName="registry-server" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.025544 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="41f22bd0-6f2c-4c59-838a-1cfe57b94ca9" containerName="keystone-cron" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.027294 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.036919 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pj9w2"] Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.220658 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-utilities\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.220781 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjllk\" (UniqueName: \"kubernetes.io/projected/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-kube-api-access-rjllk\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.220859 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-catalog-content\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.323054 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjllk\" (UniqueName: \"kubernetes.io/projected/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-kube-api-access-rjllk\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.323422 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-catalog-content\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.323492 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-utilities\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.323846 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-catalog-content\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.324041 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-utilities\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.350585 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjllk\" (UniqueName: \"kubernetes.io/projected/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-kube-api-access-rjllk\") pod \"redhat-marketplace-pj9w2\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:24 crc kubenswrapper[4664]: I1013 08:10:24.356317 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:25 crc kubenswrapper[4664]: I1013 08:10:25.068576 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pj9w2"] Oct 13 08:10:25 crc kubenswrapper[4664]: I1013 08:10:25.156707 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerStarted","Data":"4d25e7fc08234afa0aa7360f879d5265b5068d965a989d95870a007837671ab0"} Oct 13 08:10:26 crc kubenswrapper[4664]: I1013 08:10:26.166698 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerStarted","Data":"30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c"} Oct 13 08:10:27 crc kubenswrapper[4664]: I1013 08:10:27.178189 4664 generic.go:334] "Generic (PLEG): container finished" podID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerID="30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c" exitCode=0 Oct 13 08:10:27 crc kubenswrapper[4664]: I1013 08:10:27.178296 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerDied","Data":"30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c"} Oct 13 08:10:27 crc kubenswrapper[4664]: I1013 08:10:27.182856 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:10:28 crc kubenswrapper[4664]: I1013 08:10:28.225150 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerStarted","Data":"8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de"} Oct 13 08:10:28 crc kubenswrapper[4664]: I1013 08:10:28.811775 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:10:28 crc kubenswrapper[4664]: I1013 08:10:28.811881 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:10:29 crc kubenswrapper[4664]: I1013 08:10:29.238836 4664 generic.go:334] "Generic (PLEG): container finished" podID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerID="8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de" exitCode=0 Oct 13 08:10:29 crc kubenswrapper[4664]: I1013 08:10:29.238891 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerDied","Data":"8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de"} Oct 13 08:10:30 crc kubenswrapper[4664]: I1013 08:10:30.254500 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerStarted","Data":"3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb"} Oct 13 08:10:30 crc kubenswrapper[4664]: I1013 08:10:30.283401 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pj9w2" podStartSLOduration=4.781878435 podStartE2EDuration="7.283377981s" podCreationTimestamp="2025-10-13 08:10:23 +0000 UTC" firstStartedPulling="2025-10-13 08:10:27.182561505 +0000 UTC m=+5034.870006697" lastFinishedPulling="2025-10-13 08:10:29.684061051 +0000 UTC m=+5037.371506243" observedRunningTime="2025-10-13 08:10:30.273494874 +0000 UTC m=+5037.960940086" watchObservedRunningTime="2025-10-13 08:10:30.283377981 +0000 UTC m=+5037.970823183" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.201280 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w84mx"] Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.204157 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.223110 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w84mx"] Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.327860 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx7hp\" (UniqueName: \"kubernetes.io/projected/31505d65-4174-45de-9cfc-676a8891a571-kube-api-access-dx7hp\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.327931 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-utilities\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.328418 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-catalog-content\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.430707 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-catalog-content\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.430915 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx7hp\" (UniqueName: \"kubernetes.io/projected/31505d65-4174-45de-9cfc-676a8891a571-kube-api-access-dx7hp\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.430956 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-utilities\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.431414 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-utilities\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.431276 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-catalog-content\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.463104 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx7hp\" (UniqueName: \"kubernetes.io/projected/31505d65-4174-45de-9cfc-676a8891a571-kube-api-access-dx7hp\") pod \"redhat-operators-w84mx\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:33 crc kubenswrapper[4664]: I1013 08:10:33.528433 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:34 crc kubenswrapper[4664]: I1013 08:10:34.356950 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:34 crc kubenswrapper[4664]: I1013 08:10:34.357571 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:34 crc kubenswrapper[4664]: I1013 08:10:34.402092 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w84mx"] Oct 13 08:10:34 crc kubenswrapper[4664]: I1013 08:10:34.445502 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:35 crc kubenswrapper[4664]: I1013 08:10:35.299254 4664 generic.go:334] "Generic (PLEG): container finished" podID="31505d65-4174-45de-9cfc-676a8891a571" containerID="2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9" exitCode=0 Oct 13 08:10:35 crc kubenswrapper[4664]: I1013 08:10:35.299355 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w84mx" event={"ID":"31505d65-4174-45de-9cfc-676a8891a571","Type":"ContainerDied","Data":"2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9"} Oct 13 08:10:35 crc kubenswrapper[4664]: I1013 08:10:35.299636 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w84mx" event={"ID":"31505d65-4174-45de-9cfc-676a8891a571","Type":"ContainerStarted","Data":"8442934f6fc33435e092eedc4aeacc06d6a1e231800cec94769c71b8681f2348"} Oct 13 08:10:35 crc kubenswrapper[4664]: I1013 08:10:35.933659 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:36 crc kubenswrapper[4664]: I1013 08:10:36.790697 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pj9w2"] Oct 13 08:10:37 crc kubenswrapper[4664]: I1013 08:10:37.320000 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w84mx" event={"ID":"31505d65-4174-45de-9cfc-676a8891a571","Type":"ContainerStarted","Data":"52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168"} Oct 13 08:10:37 crc kubenswrapper[4664]: I1013 08:10:37.320153 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pj9w2" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="registry-server" containerID="cri-o://3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb" gracePeriod=2 Oct 13 08:10:37 crc kubenswrapper[4664]: I1013 08:10:37.984385 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.032922 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-utilities\") pod \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.033309 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjllk\" (UniqueName: \"kubernetes.io/projected/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-kube-api-access-rjllk\") pod \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.033373 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-catalog-content\") pod \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\" (UID: \"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0\") " Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.033447 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-utilities" (OuterVolumeSpecName: "utilities") pod "eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" (UID: "eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.033817 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.040072 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-kube-api-access-rjllk" (OuterVolumeSpecName: "kube-api-access-rjllk") pod "eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" (UID: "eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0"). InnerVolumeSpecName "kube-api-access-rjllk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.045668 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" (UID: "eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.136037 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjllk\" (UniqueName: \"kubernetes.io/projected/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-kube-api-access-rjllk\") on node \"crc\" DevicePath \"\"" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.136072 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.335770 4664 generic.go:334] "Generic (PLEG): container finished" podID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerID="3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb" exitCode=0 Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.335855 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerDied","Data":"3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb"} Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.335892 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pj9w2" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.335910 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pj9w2" event={"ID":"eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0","Type":"ContainerDied","Data":"4d25e7fc08234afa0aa7360f879d5265b5068d965a989d95870a007837671ab0"} Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.335978 4664 scope.go:117] "RemoveContainer" containerID="3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.392670 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pj9w2"] Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.397761 4664 scope.go:117] "RemoveContainer" containerID="8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.402790 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pj9w2"] Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.420851 4664 scope.go:117] "RemoveContainer" containerID="30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.469362 4664 scope.go:117] "RemoveContainer" containerID="3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb" Oct 13 08:10:38 crc kubenswrapper[4664]: E1013 08:10:38.469981 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb\": container with ID starting with 3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb not found: ID does not exist" containerID="3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.470033 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb"} err="failed to get container status \"3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb\": rpc error: code = NotFound desc = could not find container \"3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb\": container with ID starting with 3d51de2e399a71bdb38c311770bf9713b53a3cf8346de2ed871661fe45f02deb not found: ID does not exist" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.470078 4664 scope.go:117] "RemoveContainer" containerID="8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de" Oct 13 08:10:38 crc kubenswrapper[4664]: E1013 08:10:38.470482 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de\": container with ID starting with 8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de not found: ID does not exist" containerID="8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.470550 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de"} err="failed to get container status \"8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de\": rpc error: code = NotFound desc = could not find container \"8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de\": container with ID starting with 8aed47b4b62517ebfe3d4b282c4cdf7de3b7743bfefe7ee5c6e4959138de46de not found: ID does not exist" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.470577 4664 scope.go:117] "RemoveContainer" containerID="30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c" Oct 13 08:10:38 crc kubenswrapper[4664]: E1013 08:10:38.473887 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c\": container with ID starting with 30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c not found: ID does not exist" containerID="30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c" Oct 13 08:10:38 crc kubenswrapper[4664]: I1013 08:10:38.473923 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c"} err="failed to get container status \"30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c\": rpc error: code = NotFound desc = could not find container \"30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c\": container with ID starting with 30cd5f4c655ddc17f6b08af345cc583d76c6f93adc7e15257ae66e9de7909f7c not found: ID does not exist" Oct 13 08:10:39 crc kubenswrapper[4664]: I1013 08:10:39.064467 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" path="/var/lib/kubelet/pods/eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0/volumes" Oct 13 08:10:41 crc kubenswrapper[4664]: I1013 08:10:41.376074 4664 generic.go:334] "Generic (PLEG): container finished" podID="31505d65-4174-45de-9cfc-676a8891a571" containerID="52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168" exitCode=0 Oct 13 08:10:41 crc kubenswrapper[4664]: I1013 08:10:41.376178 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w84mx" event={"ID":"31505d65-4174-45de-9cfc-676a8891a571","Type":"ContainerDied","Data":"52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168"} Oct 13 08:10:42 crc kubenswrapper[4664]: I1013 08:10:42.394543 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w84mx" event={"ID":"31505d65-4174-45de-9cfc-676a8891a571","Type":"ContainerStarted","Data":"3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5"} Oct 13 08:10:42 crc kubenswrapper[4664]: I1013 08:10:42.431907 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w84mx" podStartSLOduration=2.661115764 podStartE2EDuration="9.43186895s" podCreationTimestamp="2025-10-13 08:10:33 +0000 UTC" firstStartedPulling="2025-10-13 08:10:35.303035362 +0000 UTC m=+5042.990480594" lastFinishedPulling="2025-10-13 08:10:42.073788568 +0000 UTC m=+5049.761233780" observedRunningTime="2025-10-13 08:10:42.427751848 +0000 UTC m=+5050.115197060" watchObservedRunningTime="2025-10-13 08:10:42.43186895 +0000 UTC m=+5050.119314172" Oct 13 08:10:43 crc kubenswrapper[4664]: I1013 08:10:43.529648 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:43 crc kubenswrapper[4664]: I1013 08:10:43.529718 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:10:44 crc kubenswrapper[4664]: I1013 08:10:44.586560 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w84mx" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" probeResult="failure" output=< Oct 13 08:10:44 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:10:44 crc kubenswrapper[4664]: > Oct 13 08:10:54 crc kubenswrapper[4664]: I1013 08:10:54.578486 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w84mx" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" probeResult="failure" output=< Oct 13 08:10:54 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:10:54 crc kubenswrapper[4664]: > Oct 13 08:10:58 crc kubenswrapper[4664]: I1013 08:10:58.812270 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:10:58 crc kubenswrapper[4664]: I1013 08:10:58.812627 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:10:58 crc kubenswrapper[4664]: I1013 08:10:58.812684 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:10:58 crc kubenswrapper[4664]: I1013 08:10:58.813861 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b2e770bf3a3cd33389d514b5a270678710030e263ae285012a3e8b566241561e"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:10:58 crc kubenswrapper[4664]: I1013 08:10:58.813933 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://b2e770bf3a3cd33389d514b5a270678710030e263ae285012a3e8b566241561e" gracePeriod=600 Oct 13 08:10:59 crc kubenswrapper[4664]: I1013 08:10:59.584538 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="b2e770bf3a3cd33389d514b5a270678710030e263ae285012a3e8b566241561e" exitCode=0 Oct 13 08:10:59 crc kubenswrapper[4664]: I1013 08:10:59.584621 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"b2e770bf3a3cd33389d514b5a270678710030e263ae285012a3e8b566241561e"} Oct 13 08:10:59 crc kubenswrapper[4664]: I1013 08:10:59.585151 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191"} Oct 13 08:10:59 crc kubenswrapper[4664]: I1013 08:10:59.585174 4664 scope.go:117] "RemoveContainer" containerID="e3ebe4c9efa95effd348598f2936dc53a1c31922afefa07b55efc7d2d960326e" Oct 13 08:11:04 crc kubenswrapper[4664]: I1013 08:11:04.590380 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w84mx" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" probeResult="failure" output=< Oct 13 08:11:04 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:11:04 crc kubenswrapper[4664]: > Oct 13 08:11:14 crc kubenswrapper[4664]: I1013 08:11:14.616199 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w84mx" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" probeResult="failure" output=< Oct 13 08:11:14 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:11:14 crc kubenswrapper[4664]: > Oct 13 08:11:23 crc kubenswrapper[4664]: I1013 08:11:23.607881 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:11:23 crc kubenswrapper[4664]: I1013 08:11:23.665288 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:11:23 crc kubenswrapper[4664]: I1013 08:11:23.848874 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w84mx"] Oct 13 08:11:24 crc kubenswrapper[4664]: I1013 08:11:24.818371 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w84mx" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" containerID="cri-o://3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5" gracePeriod=2 Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.733898 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.824033 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx7hp\" (UniqueName: \"kubernetes.io/projected/31505d65-4174-45de-9cfc-676a8891a571-kube-api-access-dx7hp\") pod \"31505d65-4174-45de-9cfc-676a8891a571\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.824178 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-catalog-content\") pod \"31505d65-4174-45de-9cfc-676a8891a571\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.824213 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-utilities\") pod \"31505d65-4174-45de-9cfc-676a8891a571\" (UID: \"31505d65-4174-45de-9cfc-676a8891a571\") " Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.825718 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-utilities" (OuterVolumeSpecName: "utilities") pod "31505d65-4174-45de-9cfc-676a8891a571" (UID: "31505d65-4174-45de-9cfc-676a8891a571"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.834161 4664 generic.go:334] "Generic (PLEG): container finished" podID="31505d65-4174-45de-9cfc-676a8891a571" containerID="3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5" exitCode=0 Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.834216 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w84mx" event={"ID":"31505d65-4174-45de-9cfc-676a8891a571","Type":"ContainerDied","Data":"3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5"} Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.834260 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w84mx" event={"ID":"31505d65-4174-45de-9cfc-676a8891a571","Type":"ContainerDied","Data":"8442934f6fc33435e092eedc4aeacc06d6a1e231800cec94769c71b8681f2348"} Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.834298 4664 scope.go:117] "RemoveContainer" containerID="3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.834307 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w84mx" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.859929 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31505d65-4174-45de-9cfc-676a8891a571-kube-api-access-dx7hp" (OuterVolumeSpecName: "kube-api-access-dx7hp") pod "31505d65-4174-45de-9cfc-676a8891a571" (UID: "31505d65-4174-45de-9cfc-676a8891a571"). InnerVolumeSpecName "kube-api-access-dx7hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.898189 4664 scope.go:117] "RemoveContainer" containerID="52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.911530 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31505d65-4174-45de-9cfc-676a8891a571" (UID: "31505d65-4174-45de-9cfc-676a8891a571"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.920768 4664 scope.go:117] "RemoveContainer" containerID="2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.927387 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx7hp\" (UniqueName: \"kubernetes.io/projected/31505d65-4174-45de-9cfc-676a8891a571-kube-api-access-dx7hp\") on node \"crc\" DevicePath \"\"" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.927780 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.927902 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31505d65-4174-45de-9cfc-676a8891a571-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.993934 4664 scope.go:117] "RemoveContainer" containerID="3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5" Oct 13 08:11:25 crc kubenswrapper[4664]: E1013 08:11:25.996993 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5\": container with ID starting with 3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5 not found: ID does not exist" containerID="3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.997059 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5"} err="failed to get container status \"3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5\": rpc error: code = NotFound desc = could not find container \"3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5\": container with ID starting with 3c4e4b001d4271b269c7ec66c23d1a7806e5c34296f1aa8904ad3d192e3e90f5 not found: ID does not exist" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.997086 4664 scope.go:117] "RemoveContainer" containerID="52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168" Oct 13 08:11:25 crc kubenswrapper[4664]: E1013 08:11:25.999009 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168\": container with ID starting with 52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168 not found: ID does not exist" containerID="52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.999056 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168"} err="failed to get container status \"52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168\": rpc error: code = NotFound desc = could not find container \"52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168\": container with ID starting with 52fe6593ad9730ac902701e9d92be19d984dcb50064251a27ee4f0b4178ac168 not found: ID does not exist" Oct 13 08:11:25 crc kubenswrapper[4664]: I1013 08:11:25.999086 4664 scope.go:117] "RemoveContainer" containerID="2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9" Oct 13 08:11:26 crc kubenswrapper[4664]: E1013 08:11:26.000551 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9\": container with ID starting with 2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9 not found: ID does not exist" containerID="2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9" Oct 13 08:11:26 crc kubenswrapper[4664]: I1013 08:11:26.000599 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9"} err="failed to get container status \"2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9\": rpc error: code = NotFound desc = could not find container \"2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9\": container with ID starting with 2471549ac8b7616b7221c9302537118560dffef2ae3d2716b8ac536122a179a9 not found: ID does not exist" Oct 13 08:11:26 crc kubenswrapper[4664]: I1013 08:11:26.176486 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w84mx"] Oct 13 08:11:26 crc kubenswrapper[4664]: I1013 08:11:26.195690 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w84mx"] Oct 13 08:11:27 crc kubenswrapper[4664]: I1013 08:11:27.058007 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31505d65-4174-45de-9cfc-676a8891a571" path="/var/lib/kubelet/pods/31505d65-4174-45de-9cfc-676a8891a571/volumes" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.273234 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-88jxq"] Oct 13 08:11:29 crc kubenswrapper[4664]: E1013 08:11:29.274159 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="extract-content" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274173 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="extract-content" Oct 13 08:11:29 crc kubenswrapper[4664]: E1013 08:11:29.274189 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="extract-content" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274195 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="extract-content" Oct 13 08:11:29 crc kubenswrapper[4664]: E1013 08:11:29.274214 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274220 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" Oct 13 08:11:29 crc kubenswrapper[4664]: E1013 08:11:29.274235 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="extract-utilities" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274241 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="extract-utilities" Oct 13 08:11:29 crc kubenswrapper[4664]: E1013 08:11:29.274257 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="extract-utilities" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274263 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="extract-utilities" Oct 13 08:11:29 crc kubenswrapper[4664]: E1013 08:11:29.274280 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="registry-server" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274286 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="registry-server" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274476 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="31505d65-4174-45de-9cfc-676a8891a571" containerName="registry-server" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.274506 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="eaf27b2a-2e9b-48e1-a2a3-9efb33d780e0" containerName="registry-server" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.275852 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.284440 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-88jxq"] Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.411234 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtkt2\" (UniqueName: \"kubernetes.io/projected/1052e1cb-e170-4313-b25d-73b1ea27af0b-kube-api-access-dtkt2\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.411322 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-catalog-content\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.411403 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-utilities\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.513559 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtkt2\" (UniqueName: \"kubernetes.io/projected/1052e1cb-e170-4313-b25d-73b1ea27af0b-kube-api-access-dtkt2\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.513659 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-catalog-content\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.513746 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-utilities\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.514356 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-utilities\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.514517 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-catalog-content\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.535645 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtkt2\" (UniqueName: \"kubernetes.io/projected/1052e1cb-e170-4313-b25d-73b1ea27af0b-kube-api-access-dtkt2\") pod \"community-operators-88jxq\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:29 crc kubenswrapper[4664]: I1013 08:11:29.634038 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:30 crc kubenswrapper[4664]: I1013 08:11:30.222755 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-88jxq"] Oct 13 08:11:30 crc kubenswrapper[4664]: I1013 08:11:30.888552 4664 generic.go:334] "Generic (PLEG): container finished" podID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerID="fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e" exitCode=0 Oct 13 08:11:30 crc kubenswrapper[4664]: I1013 08:11:30.888632 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-88jxq" event={"ID":"1052e1cb-e170-4313-b25d-73b1ea27af0b","Type":"ContainerDied","Data":"fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e"} Oct 13 08:11:30 crc kubenswrapper[4664]: I1013 08:11:30.888667 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-88jxq" event={"ID":"1052e1cb-e170-4313-b25d-73b1ea27af0b","Type":"ContainerStarted","Data":"651b7e81461d2ae921254eddb5dcd03998635be4d5cb486cacc1f13fcd51d45a"} Oct 13 08:11:32 crc kubenswrapper[4664]: I1013 08:11:32.922883 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-88jxq" event={"ID":"1052e1cb-e170-4313-b25d-73b1ea27af0b","Type":"ContainerStarted","Data":"26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de"} Oct 13 08:11:33 crc kubenswrapper[4664]: I1013 08:11:33.933079 4664 generic.go:334] "Generic (PLEG): container finished" podID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerID="26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de" exitCode=0 Oct 13 08:11:33 crc kubenswrapper[4664]: I1013 08:11:33.933147 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-88jxq" event={"ID":"1052e1cb-e170-4313-b25d-73b1ea27af0b","Type":"ContainerDied","Data":"26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de"} Oct 13 08:11:34 crc kubenswrapper[4664]: I1013 08:11:34.945850 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-88jxq" event={"ID":"1052e1cb-e170-4313-b25d-73b1ea27af0b","Type":"ContainerStarted","Data":"0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f"} Oct 13 08:11:34 crc kubenswrapper[4664]: I1013 08:11:34.969078 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-88jxq" podStartSLOduration=2.243702465 podStartE2EDuration="5.969057664s" podCreationTimestamp="2025-10-13 08:11:29 +0000 UTC" firstStartedPulling="2025-10-13 08:11:30.891290607 +0000 UTC m=+5098.578735809" lastFinishedPulling="2025-10-13 08:11:34.616645816 +0000 UTC m=+5102.304091008" observedRunningTime="2025-10-13 08:11:34.964378777 +0000 UTC m=+5102.651823969" watchObservedRunningTime="2025-10-13 08:11:34.969057664 +0000 UTC m=+5102.656502866" Oct 13 08:11:39 crc kubenswrapper[4664]: I1013 08:11:39.634482 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:39 crc kubenswrapper[4664]: I1013 08:11:39.635333 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:40 crc kubenswrapper[4664]: I1013 08:11:40.924039 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-88jxq" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="registry-server" probeResult="failure" output=< Oct 13 08:11:40 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:11:40 crc kubenswrapper[4664]: > Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.472157 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qg9dc"] Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.487699 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.492440 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qg9dc"] Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.600036 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c86bf\" (UniqueName: \"kubernetes.io/projected/489ef556-fc74-4580-9178-895c8aed9378-kube-api-access-c86bf\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.600159 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-catalog-content\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.600206 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-utilities\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.682002 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.701419 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-catalog-content\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.701492 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-utilities\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.701550 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c86bf\" (UniqueName: \"kubernetes.io/projected/489ef556-fc74-4580-9178-895c8aed9378-kube-api-access-c86bf\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.702247 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-utilities\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.703697 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-catalog-content\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.732711 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c86bf\" (UniqueName: \"kubernetes.io/projected/489ef556-fc74-4580-9178-895c8aed9378-kube-api-access-c86bf\") pod \"certified-operators-qg9dc\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.746407 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:49 crc kubenswrapper[4664]: I1013 08:11:49.836244 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:50 crc kubenswrapper[4664]: I1013 08:11:50.421952 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qg9dc"] Oct 13 08:11:51 crc kubenswrapper[4664]: I1013 08:11:51.101040 4664 generic.go:334] "Generic (PLEG): container finished" podID="489ef556-fc74-4580-9178-895c8aed9378" containerID="5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65" exitCode=0 Oct 13 08:11:51 crc kubenswrapper[4664]: I1013 08:11:51.101132 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg9dc" event={"ID":"489ef556-fc74-4580-9178-895c8aed9378","Type":"ContainerDied","Data":"5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65"} Oct 13 08:11:51 crc kubenswrapper[4664]: I1013 08:11:51.102519 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg9dc" event={"ID":"489ef556-fc74-4580-9178-895c8aed9378","Type":"ContainerStarted","Data":"19ea7e9453c59fa65380a1c9fb4035bc51c8031de64800f851097fb13e0220b1"} Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.034766 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-88jxq"] Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.035465 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-88jxq" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="registry-server" containerID="cri-o://0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f" gracePeriod=2 Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.645560 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.775595 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-catalog-content\") pod \"1052e1cb-e170-4313-b25d-73b1ea27af0b\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.775742 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-utilities\") pod \"1052e1cb-e170-4313-b25d-73b1ea27af0b\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.775823 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtkt2\" (UniqueName: \"kubernetes.io/projected/1052e1cb-e170-4313-b25d-73b1ea27af0b-kube-api-access-dtkt2\") pod \"1052e1cb-e170-4313-b25d-73b1ea27af0b\" (UID: \"1052e1cb-e170-4313-b25d-73b1ea27af0b\") " Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.776574 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-utilities" (OuterVolumeSpecName: "utilities") pod "1052e1cb-e170-4313-b25d-73b1ea27af0b" (UID: "1052e1cb-e170-4313-b25d-73b1ea27af0b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.781318 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1052e1cb-e170-4313-b25d-73b1ea27af0b-kube-api-access-dtkt2" (OuterVolumeSpecName: "kube-api-access-dtkt2") pod "1052e1cb-e170-4313-b25d-73b1ea27af0b" (UID: "1052e1cb-e170-4313-b25d-73b1ea27af0b"). InnerVolumeSpecName "kube-api-access-dtkt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.837578 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1052e1cb-e170-4313-b25d-73b1ea27af0b" (UID: "1052e1cb-e170-4313-b25d-73b1ea27af0b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.878115 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.878172 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1052e1cb-e170-4313-b25d-73b1ea27af0b-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:11:52 crc kubenswrapper[4664]: I1013 08:11:52.878186 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtkt2\" (UniqueName: \"kubernetes.io/projected/1052e1cb-e170-4313-b25d-73b1ea27af0b-kube-api-access-dtkt2\") on node \"crc\" DevicePath \"\"" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.127825 4664 generic.go:334] "Generic (PLEG): container finished" podID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerID="0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f" exitCode=0 Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.127989 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-88jxq" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.128752 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-88jxq" event={"ID":"1052e1cb-e170-4313-b25d-73b1ea27af0b","Type":"ContainerDied","Data":"0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f"} Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.128827 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-88jxq" event={"ID":"1052e1cb-e170-4313-b25d-73b1ea27af0b","Type":"ContainerDied","Data":"651b7e81461d2ae921254eddb5dcd03998635be4d5cb486cacc1f13fcd51d45a"} Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.128855 4664 scope.go:117] "RemoveContainer" containerID="0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.134139 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg9dc" event={"ID":"489ef556-fc74-4580-9178-895c8aed9378","Type":"ContainerStarted","Data":"1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c"} Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.174851 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-88jxq"] Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.181784 4664 scope.go:117] "RemoveContainer" containerID="26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.183438 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-88jxq"] Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.229692 4664 scope.go:117] "RemoveContainer" containerID="fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.280537 4664 scope.go:117] "RemoveContainer" containerID="0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f" Oct 13 08:11:53 crc kubenswrapper[4664]: E1013 08:11:53.281232 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f\": container with ID starting with 0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f not found: ID does not exist" containerID="0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.281266 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f"} err="failed to get container status \"0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f\": rpc error: code = NotFound desc = could not find container \"0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f\": container with ID starting with 0f8927e76bd01c6c6cc624ed4f9c395833f44c9e8ea7659150f66b7288f62d0f not found: ID does not exist" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.281286 4664 scope.go:117] "RemoveContainer" containerID="26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de" Oct 13 08:11:53 crc kubenswrapper[4664]: E1013 08:11:53.281532 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de\": container with ID starting with 26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de not found: ID does not exist" containerID="26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.281561 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de"} err="failed to get container status \"26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de\": rpc error: code = NotFound desc = could not find container \"26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de\": container with ID starting with 26dfd9f2c4f1188cc8b3ac031668f609dc024ffcf5c3cf9c5288752dce21d2de not found: ID does not exist" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.281574 4664 scope.go:117] "RemoveContainer" containerID="fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e" Oct 13 08:11:53 crc kubenswrapper[4664]: E1013 08:11:53.281866 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e\": container with ID starting with fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e not found: ID does not exist" containerID="fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e" Oct 13 08:11:53 crc kubenswrapper[4664]: I1013 08:11:53.281953 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e"} err="failed to get container status \"fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e\": rpc error: code = NotFound desc = could not find container \"fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e\": container with ID starting with fda39a47341bdcbff557fca41c451f4edd3a589b8103109aeb342b15cd750f8e not found: ID does not exist" Oct 13 08:11:54 crc kubenswrapper[4664]: I1013 08:11:54.144170 4664 generic.go:334] "Generic (PLEG): container finished" podID="489ef556-fc74-4580-9178-895c8aed9378" containerID="1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c" exitCode=0 Oct 13 08:11:54 crc kubenswrapper[4664]: I1013 08:11:54.144260 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg9dc" event={"ID":"489ef556-fc74-4580-9178-895c8aed9378","Type":"ContainerDied","Data":"1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c"} Oct 13 08:11:55 crc kubenswrapper[4664]: I1013 08:11:55.061271 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" path="/var/lib/kubelet/pods/1052e1cb-e170-4313-b25d-73b1ea27af0b/volumes" Oct 13 08:11:55 crc kubenswrapper[4664]: I1013 08:11:55.163986 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg9dc" event={"ID":"489ef556-fc74-4580-9178-895c8aed9378","Type":"ContainerStarted","Data":"bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6"} Oct 13 08:11:55 crc kubenswrapper[4664]: I1013 08:11:55.193409 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qg9dc" podStartSLOduration=2.568368276 podStartE2EDuration="6.19339356s" podCreationTimestamp="2025-10-13 08:11:49 +0000 UTC" firstStartedPulling="2025-10-13 08:11:51.103557236 +0000 UTC m=+5118.791002428" lastFinishedPulling="2025-10-13 08:11:54.72858251 +0000 UTC m=+5122.416027712" observedRunningTime="2025-10-13 08:11:55.192940758 +0000 UTC m=+5122.880385980" watchObservedRunningTime="2025-10-13 08:11:55.19339356 +0000 UTC m=+5122.880838752" Oct 13 08:11:59 crc kubenswrapper[4664]: I1013 08:11:59.836816 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:59 crc kubenswrapper[4664]: I1013 08:11:59.837136 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:11:59 crc kubenswrapper[4664]: I1013 08:11:59.883724 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:12:00 crc kubenswrapper[4664]: I1013 08:12:00.269574 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:12:00 crc kubenswrapper[4664]: I1013 08:12:00.461955 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qg9dc"] Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.237856 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qg9dc" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="registry-server" containerID="cri-o://bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6" gracePeriod=2 Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.782190 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.875240 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-utilities\") pod \"489ef556-fc74-4580-9178-895c8aed9378\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.875403 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-catalog-content\") pod \"489ef556-fc74-4580-9178-895c8aed9378\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.875455 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c86bf\" (UniqueName: \"kubernetes.io/projected/489ef556-fc74-4580-9178-895c8aed9378-kube-api-access-c86bf\") pod \"489ef556-fc74-4580-9178-895c8aed9378\" (UID: \"489ef556-fc74-4580-9178-895c8aed9378\") " Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.876125 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-utilities" (OuterVolumeSpecName: "utilities") pod "489ef556-fc74-4580-9178-895c8aed9378" (UID: "489ef556-fc74-4580-9178-895c8aed9378"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.881167 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/489ef556-fc74-4580-9178-895c8aed9378-kube-api-access-c86bf" (OuterVolumeSpecName: "kube-api-access-c86bf") pod "489ef556-fc74-4580-9178-895c8aed9378" (UID: "489ef556-fc74-4580-9178-895c8aed9378"). InnerVolumeSpecName "kube-api-access-c86bf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.932720 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "489ef556-fc74-4580-9178-895c8aed9378" (UID: "489ef556-fc74-4580-9178-895c8aed9378"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.977699 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.977750 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c86bf\" (UniqueName: \"kubernetes.io/projected/489ef556-fc74-4580-9178-895c8aed9378-kube-api-access-c86bf\") on node \"crc\" DevicePath \"\"" Oct 13 08:12:02 crc kubenswrapper[4664]: I1013 08:12:02.977763 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/489ef556-fc74-4580-9178-895c8aed9378-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.248201 4664 generic.go:334] "Generic (PLEG): container finished" podID="489ef556-fc74-4580-9178-895c8aed9378" containerID="bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6" exitCode=0 Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.248245 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg9dc" event={"ID":"489ef556-fc74-4580-9178-895c8aed9378","Type":"ContainerDied","Data":"bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6"} Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.248259 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg9dc" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.248283 4664 scope.go:117] "RemoveContainer" containerID="bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.248272 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg9dc" event={"ID":"489ef556-fc74-4580-9178-895c8aed9378","Type":"ContainerDied","Data":"19ea7e9453c59fa65380a1c9fb4035bc51c8031de64800f851097fb13e0220b1"} Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.278185 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qg9dc"] Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.286887 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qg9dc"] Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.293249 4664 scope.go:117] "RemoveContainer" containerID="1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.313484 4664 scope.go:117] "RemoveContainer" containerID="5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.370122 4664 scope.go:117] "RemoveContainer" containerID="bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6" Oct 13 08:12:03 crc kubenswrapper[4664]: E1013 08:12:03.370557 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6\": container with ID starting with bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6 not found: ID does not exist" containerID="bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.370590 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6"} err="failed to get container status \"bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6\": rpc error: code = NotFound desc = could not find container \"bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6\": container with ID starting with bf74e807c559e77a9d7bb2c4b2dc9b6e28033e66553e6111ea975fd19299d0e6 not found: ID does not exist" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.370611 4664 scope.go:117] "RemoveContainer" containerID="1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c" Oct 13 08:12:03 crc kubenswrapper[4664]: E1013 08:12:03.371087 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c\": container with ID starting with 1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c not found: ID does not exist" containerID="1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.371110 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c"} err="failed to get container status \"1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c\": rpc error: code = NotFound desc = could not find container \"1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c\": container with ID starting with 1dfc55e52785b243df8bd85aa46e6135449e761124d6f1a83878715ec935fa1c not found: ID does not exist" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.371122 4664 scope.go:117] "RemoveContainer" containerID="5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65" Oct 13 08:12:03 crc kubenswrapper[4664]: E1013 08:12:03.371500 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65\": container with ID starting with 5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65 not found: ID does not exist" containerID="5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65" Oct 13 08:12:03 crc kubenswrapper[4664]: I1013 08:12:03.371523 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65"} err="failed to get container status \"5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65\": rpc error: code = NotFound desc = could not find container \"5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65\": container with ID starting with 5acf1d8143382ae2f475c8ceff1ff9123e14317fad610f39ba5d386252538b65 not found: ID does not exist" Oct 13 08:12:05 crc kubenswrapper[4664]: I1013 08:12:05.057110 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="489ef556-fc74-4580-9178-895c8aed9378" path="/var/lib/kubelet/pods/489ef556-fc74-4580-9178-895c8aed9378/volumes" Oct 13 08:13:28 crc kubenswrapper[4664]: I1013 08:13:28.812183 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:13:28 crc kubenswrapper[4664]: I1013 08:13:28.813182 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:13:58 crc kubenswrapper[4664]: I1013 08:13:58.811635 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:13:58 crc kubenswrapper[4664]: I1013 08:13:58.812190 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:14:28 crc kubenswrapper[4664]: I1013 08:14:28.812423 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:14:28 crc kubenswrapper[4664]: I1013 08:14:28.813101 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:14:28 crc kubenswrapper[4664]: I1013 08:14:28.813150 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:14:28 crc kubenswrapper[4664]: I1013 08:14:28.813833 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:14:28 crc kubenswrapper[4664]: I1013 08:14:28.813912 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" gracePeriod=600 Oct 13 08:14:29 crc kubenswrapper[4664]: E1013 08:14:29.507833 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:14:29 crc kubenswrapper[4664]: I1013 08:14:29.777335 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" exitCode=0 Oct 13 08:14:29 crc kubenswrapper[4664]: I1013 08:14:29.777384 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191"} Oct 13 08:14:29 crc kubenswrapper[4664]: I1013 08:14:29.777418 4664 scope.go:117] "RemoveContainer" containerID="b2e770bf3a3cd33389d514b5a270678710030e263ae285012a3e8b566241561e" Oct 13 08:14:29 crc kubenswrapper[4664]: I1013 08:14:29.778101 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:14:29 crc kubenswrapper[4664]: E1013 08:14:29.778362 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:14:43 crc kubenswrapper[4664]: I1013 08:14:43.053969 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:14:43 crc kubenswrapper[4664]: E1013 08:14:43.056471 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:14:54 crc kubenswrapper[4664]: I1013 08:14:54.047283 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:14:54 crc kubenswrapper[4664]: E1013 08:14:54.048252 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.163358 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm"] Oct 13 08:15:00 crc kubenswrapper[4664]: E1013 08:15:00.164713 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="registry-server" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.164740 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="registry-server" Oct 13 08:15:00 crc kubenswrapper[4664]: E1013 08:15:00.164764 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="extract-content" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.164777 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="extract-content" Oct 13 08:15:00 crc kubenswrapper[4664]: E1013 08:15:00.164841 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="extract-utilities" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.164856 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="extract-utilities" Oct 13 08:15:00 crc kubenswrapper[4664]: E1013 08:15:00.164886 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="extract-content" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.164898 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="extract-content" Oct 13 08:15:00 crc kubenswrapper[4664]: E1013 08:15:00.164927 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="registry-server" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.164939 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="registry-server" Oct 13 08:15:00 crc kubenswrapper[4664]: E1013 08:15:00.164976 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="extract-utilities" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.164988 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="extract-utilities" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.165409 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="489ef556-fc74-4580-9178-895c8aed9378" containerName="registry-server" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.165456 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="1052e1cb-e170-4313-b25d-73b1ea27af0b" containerName="registry-server" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.167161 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.171432 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm"] Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.177769 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.207142 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.268298 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4c6351-6377-499a-88c9-d9e24c5dcb82-config-volume\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.268393 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss9zc\" (UniqueName: \"kubernetes.io/projected/0d4c6351-6377-499a-88c9-d9e24c5dcb82-kube-api-access-ss9zc\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.268463 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4c6351-6377-499a-88c9-d9e24c5dcb82-secret-volume\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.370776 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4c6351-6377-499a-88c9-d9e24c5dcb82-config-volume\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.370926 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss9zc\" (UniqueName: \"kubernetes.io/projected/0d4c6351-6377-499a-88c9-d9e24c5dcb82-kube-api-access-ss9zc\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.371010 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4c6351-6377-499a-88c9-d9e24c5dcb82-secret-volume\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.371759 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4c6351-6377-499a-88c9-d9e24c5dcb82-config-volume\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.380179 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4c6351-6377-499a-88c9-d9e24c5dcb82-secret-volume\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.388179 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss9zc\" (UniqueName: \"kubernetes.io/projected/0d4c6351-6377-499a-88c9-d9e24c5dcb82-kube-api-access-ss9zc\") pod \"collect-profiles-29339055-xq5qm\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:00 crc kubenswrapper[4664]: I1013 08:15:00.504175 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:01 crc kubenswrapper[4664]: I1013 08:15:01.034706 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm"] Oct 13 08:15:01 crc kubenswrapper[4664]: I1013 08:15:01.097457 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" event={"ID":"0d4c6351-6377-499a-88c9-d9e24c5dcb82","Type":"ContainerStarted","Data":"196abebf552d13e1e0d1b0d3b9ab1980a931e587f6d8251a70e259d065f205c9"} Oct 13 08:15:02 crc kubenswrapper[4664]: I1013 08:15:02.105845 4664 generic.go:334] "Generic (PLEG): container finished" podID="0d4c6351-6377-499a-88c9-d9e24c5dcb82" containerID="a5b42ea02ac8b153ff2a58b5d0e458a558e5398623bcd42d3c5150091c6d742e" exitCode=0 Oct 13 08:15:02 crc kubenswrapper[4664]: I1013 08:15:02.105914 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" event={"ID":"0d4c6351-6377-499a-88c9-d9e24c5dcb82","Type":"ContainerDied","Data":"a5b42ea02ac8b153ff2a58b5d0e458a558e5398623bcd42d3c5150091c6d742e"} Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.513911 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.635999 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4c6351-6377-499a-88c9-d9e24c5dcb82-config-volume\") pod \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.636197 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4c6351-6377-499a-88c9-d9e24c5dcb82-secret-volume\") pod \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.636240 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ss9zc\" (UniqueName: \"kubernetes.io/projected/0d4c6351-6377-499a-88c9-d9e24c5dcb82-kube-api-access-ss9zc\") pod \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\" (UID: \"0d4c6351-6377-499a-88c9-d9e24c5dcb82\") " Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.636682 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4c6351-6377-499a-88c9-d9e24c5dcb82-config-volume" (OuterVolumeSpecName: "config-volume") pod "0d4c6351-6377-499a-88c9-d9e24c5dcb82" (UID: "0d4c6351-6377-499a-88c9-d9e24c5dcb82"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.637037 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4c6351-6377-499a-88c9-d9e24c5dcb82-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.642187 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d4c6351-6377-499a-88c9-d9e24c5dcb82-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0d4c6351-6377-499a-88c9-d9e24c5dcb82" (UID: "0d4c6351-6377-499a-88c9-d9e24c5dcb82"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.642773 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d4c6351-6377-499a-88c9-d9e24c5dcb82-kube-api-access-ss9zc" (OuterVolumeSpecName: "kube-api-access-ss9zc") pod "0d4c6351-6377-499a-88c9-d9e24c5dcb82" (UID: "0d4c6351-6377-499a-88c9-d9e24c5dcb82"). InnerVolumeSpecName "kube-api-access-ss9zc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.739061 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4c6351-6377-499a-88c9-d9e24c5dcb82-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:15:03 crc kubenswrapper[4664]: I1013 08:15:03.739337 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ss9zc\" (UniqueName: \"kubernetes.io/projected/0d4c6351-6377-499a-88c9-d9e24c5dcb82-kube-api-access-ss9zc\") on node \"crc\" DevicePath \"\"" Oct 13 08:15:04 crc kubenswrapper[4664]: I1013 08:15:04.127377 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" event={"ID":"0d4c6351-6377-499a-88c9-d9e24c5dcb82","Type":"ContainerDied","Data":"196abebf552d13e1e0d1b0d3b9ab1980a931e587f6d8251a70e259d065f205c9"} Oct 13 08:15:04 crc kubenswrapper[4664]: I1013 08:15:04.127420 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="196abebf552d13e1e0d1b0d3b9ab1980a931e587f6d8251a70e259d065f205c9" Oct 13 08:15:04 crc kubenswrapper[4664]: I1013 08:15:04.127483 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm" Oct 13 08:15:04 crc kubenswrapper[4664]: I1013 08:15:04.598339 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb"] Oct 13 08:15:04 crc kubenswrapper[4664]: I1013 08:15:04.606882 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339010-mkrsb"] Oct 13 08:15:05 crc kubenswrapper[4664]: I1013 08:15:05.061607 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f363fe6-c902-4860-a3d9-4e1d39803e17" path="/var/lib/kubelet/pods/5f363fe6-c902-4860-a3d9-4e1d39803e17/volumes" Oct 13 08:15:06 crc kubenswrapper[4664]: I1013 08:15:06.048210 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:15:06 crc kubenswrapper[4664]: E1013 08:15:06.048444 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:15:20 crc kubenswrapper[4664]: I1013 08:15:20.046763 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:15:20 crc kubenswrapper[4664]: E1013 08:15:20.047466 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:15:31 crc kubenswrapper[4664]: I1013 08:15:31.048048 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:15:31 crc kubenswrapper[4664]: E1013 08:15:31.049297 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:15:42 crc kubenswrapper[4664]: I1013 08:15:42.633110 4664 scope.go:117] "RemoveContainer" containerID="89e0305340ea2ea3fa70058fd26333b323f2f38aac8903c75ff514000f2df269" Oct 13 08:15:44 crc kubenswrapper[4664]: I1013 08:15:44.047276 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:15:44 crc kubenswrapper[4664]: E1013 08:15:44.047895 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:15:59 crc kubenswrapper[4664]: I1013 08:15:59.047405 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:15:59 crc kubenswrapper[4664]: E1013 08:15:59.048368 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:16:14 crc kubenswrapper[4664]: I1013 08:16:14.047405 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:16:14 crc kubenswrapper[4664]: E1013 08:16:14.048255 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:16:25 crc kubenswrapper[4664]: I1013 08:16:25.047795 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:16:25 crc kubenswrapper[4664]: E1013 08:16:25.048655 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:16:39 crc kubenswrapper[4664]: I1013 08:16:39.047997 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:16:39 crc kubenswrapper[4664]: E1013 08:16:39.049211 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:16:50 crc kubenswrapper[4664]: I1013 08:16:50.047354 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:16:50 crc kubenswrapper[4664]: E1013 08:16:50.047934 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:17:02 crc kubenswrapper[4664]: I1013 08:17:02.047630 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:17:02 crc kubenswrapper[4664]: E1013 08:17:02.048673 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:17:17 crc kubenswrapper[4664]: I1013 08:17:17.047255 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:17:17 crc kubenswrapper[4664]: E1013 08:17:17.048129 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:17:28 crc kubenswrapper[4664]: I1013 08:17:28.047692 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:17:28 crc kubenswrapper[4664]: E1013 08:17:28.048377 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:17:42 crc kubenswrapper[4664]: I1013 08:17:42.047394 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:17:42 crc kubenswrapper[4664]: E1013 08:17:42.048400 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:17:56 crc kubenswrapper[4664]: I1013 08:17:56.047052 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:17:56 crc kubenswrapper[4664]: E1013 08:17:56.049672 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:18:09 crc kubenswrapper[4664]: I1013 08:18:09.047519 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:18:09 crc kubenswrapper[4664]: E1013 08:18:09.048350 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:18:22 crc kubenswrapper[4664]: I1013 08:18:22.047504 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:18:22 crc kubenswrapper[4664]: E1013 08:18:22.048563 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:18:36 crc kubenswrapper[4664]: I1013 08:18:36.047287 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:18:36 crc kubenswrapper[4664]: E1013 08:18:36.047952 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:18:50 crc kubenswrapper[4664]: I1013 08:18:50.047527 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:18:50 crc kubenswrapper[4664]: E1013 08:18:50.048361 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:19:01 crc kubenswrapper[4664]: I1013 08:19:01.047310 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:19:01 crc kubenswrapper[4664]: E1013 08:19:01.048749 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:19:15 crc kubenswrapper[4664]: I1013 08:19:15.047628 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:19:15 crc kubenswrapper[4664]: E1013 08:19:15.048351 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:19:28 crc kubenswrapper[4664]: I1013 08:19:28.047455 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:19:28 crc kubenswrapper[4664]: E1013 08:19:28.048335 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:19:42 crc kubenswrapper[4664]: I1013 08:19:42.047897 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:19:42 crc kubenswrapper[4664]: I1013 08:19:42.979111 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"94e4aabad2b1de3fc8ecafacb4a304a43e4d5596b9134856154624f047695f53"} Oct 13 08:21:14 crc kubenswrapper[4664]: I1013 08:21:14.976695 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mf65w"] Oct 13 08:21:14 crc kubenswrapper[4664]: E1013 08:21:14.977815 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4c6351-6377-499a-88c9-d9e24c5dcb82" containerName="collect-profiles" Oct 13 08:21:14 crc kubenswrapper[4664]: I1013 08:21:14.977831 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4c6351-6377-499a-88c9-d9e24c5dcb82" containerName="collect-profiles" Oct 13 08:21:14 crc kubenswrapper[4664]: I1013 08:21:14.978086 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4c6351-6377-499a-88c9-d9e24c5dcb82" containerName="collect-profiles" Oct 13 08:21:14 crc kubenswrapper[4664]: I1013 08:21:14.980655 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:14 crc kubenswrapper[4664]: I1013 08:21:14.991739 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mf65w"] Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.010918 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-catalog-content\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.011217 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-utilities\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.011286 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6nb7\" (UniqueName: \"kubernetes.io/projected/ae72bef5-66c7-4ab2-980d-1a721663f3fd-kube-api-access-z6nb7\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.112371 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-catalog-content\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.112427 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-utilities\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.112507 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6nb7\" (UniqueName: \"kubernetes.io/projected/ae72bef5-66c7-4ab2-980d-1a721663f3fd-kube-api-access-z6nb7\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.112856 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-catalog-content\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.113570 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-utilities\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.145674 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6nb7\" (UniqueName: \"kubernetes.io/projected/ae72bef5-66c7-4ab2-980d-1a721663f3fd-kube-api-access-z6nb7\") pod \"redhat-marketplace-mf65w\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.310948 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.830013 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mf65w"] Oct 13 08:21:15 crc kubenswrapper[4664]: I1013 08:21:15.948418 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf65w" event={"ID":"ae72bef5-66c7-4ab2-980d-1a721663f3fd","Type":"ContainerStarted","Data":"92dccd2e5d6c80f057eae57d39078c7e29e6f80078f1ddaf9e00e7f269cc2404"} Oct 13 08:21:16 crc kubenswrapper[4664]: I1013 08:21:16.963315 4664 generic.go:334] "Generic (PLEG): container finished" podID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerID="93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883" exitCode=0 Oct 13 08:21:16 crc kubenswrapper[4664]: I1013 08:21:16.963528 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf65w" event={"ID":"ae72bef5-66c7-4ab2-980d-1a721663f3fd","Type":"ContainerDied","Data":"93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883"} Oct 13 08:21:16 crc kubenswrapper[4664]: I1013 08:21:16.967294 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:21:19 crc kubenswrapper[4664]: I1013 08:21:19.001296 4664 generic.go:334] "Generic (PLEG): container finished" podID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerID="d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41" exitCode=0 Oct 13 08:21:19 crc kubenswrapper[4664]: I1013 08:21:19.001403 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf65w" event={"ID":"ae72bef5-66c7-4ab2-980d-1a721663f3fd","Type":"ContainerDied","Data":"d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41"} Oct 13 08:21:20 crc kubenswrapper[4664]: I1013 08:21:20.014724 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf65w" event={"ID":"ae72bef5-66c7-4ab2-980d-1a721663f3fd","Type":"ContainerStarted","Data":"e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e"} Oct 13 08:21:20 crc kubenswrapper[4664]: I1013 08:21:20.037688 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mf65w" podStartSLOduration=3.538992578 podStartE2EDuration="6.037649955s" podCreationTimestamp="2025-10-13 08:21:14 +0000 UTC" firstStartedPulling="2025-10-13 08:21:16.966876925 +0000 UTC m=+5684.654322137" lastFinishedPulling="2025-10-13 08:21:19.465534332 +0000 UTC m=+5687.152979514" observedRunningTime="2025-10-13 08:21:20.037193873 +0000 UTC m=+5687.724639075" watchObservedRunningTime="2025-10-13 08:21:20.037649955 +0000 UTC m=+5687.725095187" Oct 13 08:21:25 crc kubenswrapper[4664]: I1013 08:21:25.311533 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:25 crc kubenswrapper[4664]: I1013 08:21:25.311980 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:25 crc kubenswrapper[4664]: I1013 08:21:25.366612 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:26 crc kubenswrapper[4664]: I1013 08:21:26.182107 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:26 crc kubenswrapper[4664]: I1013 08:21:26.253393 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mf65w"] Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.118572 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mf65w" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="registry-server" containerID="cri-o://e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e" gracePeriod=2 Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.584666 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.685025 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6nb7\" (UniqueName: \"kubernetes.io/projected/ae72bef5-66c7-4ab2-980d-1a721663f3fd-kube-api-access-z6nb7\") pod \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.685519 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-catalog-content\") pod \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.685719 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-utilities\") pod \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\" (UID: \"ae72bef5-66c7-4ab2-980d-1a721663f3fd\") " Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.686509 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-utilities" (OuterVolumeSpecName: "utilities") pod "ae72bef5-66c7-4ab2-980d-1a721663f3fd" (UID: "ae72bef5-66c7-4ab2-980d-1a721663f3fd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.690955 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae72bef5-66c7-4ab2-980d-1a721663f3fd-kube-api-access-z6nb7" (OuterVolumeSpecName: "kube-api-access-z6nb7") pod "ae72bef5-66c7-4ab2-980d-1a721663f3fd" (UID: "ae72bef5-66c7-4ab2-980d-1a721663f3fd"). InnerVolumeSpecName "kube-api-access-z6nb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.698149 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae72bef5-66c7-4ab2-980d-1a721663f3fd" (UID: "ae72bef5-66c7-4ab2-980d-1a721663f3fd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.787533 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6nb7\" (UniqueName: \"kubernetes.io/projected/ae72bef5-66c7-4ab2-980d-1a721663f3fd-kube-api-access-z6nb7\") on node \"crc\" DevicePath \"\"" Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.787588 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:21:28 crc kubenswrapper[4664]: I1013 08:21:28.787609 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae72bef5-66c7-4ab2-980d-1a721663f3fd-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.136436 4664 generic.go:334] "Generic (PLEG): container finished" podID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerID="e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e" exitCode=0 Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.136508 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf65w" event={"ID":"ae72bef5-66c7-4ab2-980d-1a721663f3fd","Type":"ContainerDied","Data":"e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e"} Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.136565 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf65w" event={"ID":"ae72bef5-66c7-4ab2-980d-1a721663f3fd","Type":"ContainerDied","Data":"92dccd2e5d6c80f057eae57d39078c7e29e6f80078f1ddaf9e00e7f269cc2404"} Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.136600 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mf65w" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.136628 4664 scope.go:117] "RemoveContainer" containerID="e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.173047 4664 scope.go:117] "RemoveContainer" containerID="d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.178490 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mf65w"] Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.205893 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mf65w"] Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.208467 4664 scope.go:117] "RemoveContainer" containerID="93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.240846 4664 scope.go:117] "RemoveContainer" containerID="e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e" Oct 13 08:21:29 crc kubenswrapper[4664]: E1013 08:21:29.241661 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e\": container with ID starting with e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e not found: ID does not exist" containerID="e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.241848 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e"} err="failed to get container status \"e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e\": rpc error: code = NotFound desc = could not find container \"e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e\": container with ID starting with e50b544270fe57754ac467b53cebc0de9a04eb3260f69f1ead9ec2952364c18e not found: ID does not exist" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.241978 4664 scope.go:117] "RemoveContainer" containerID="d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41" Oct 13 08:21:29 crc kubenswrapper[4664]: E1013 08:21:29.242521 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41\": container with ID starting with d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41 not found: ID does not exist" containerID="d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.242760 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41"} err="failed to get container status \"d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41\": rpc error: code = NotFound desc = could not find container \"d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41\": container with ID starting with d2b4ec5d4d9459dbd64448b2aa2ab3218d60f4f5e02ecc976a29c9ec368bca41 not found: ID does not exist" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.242877 4664 scope.go:117] "RemoveContainer" containerID="93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883" Oct 13 08:21:29 crc kubenswrapper[4664]: E1013 08:21:29.243349 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883\": container with ID starting with 93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883 not found: ID does not exist" containerID="93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883" Oct 13 08:21:29 crc kubenswrapper[4664]: I1013 08:21:29.243398 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883"} err="failed to get container status \"93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883\": rpc error: code = NotFound desc = could not find container \"93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883\": container with ID starting with 93daf74a5f9b2ae45812c0763da4bb126ac4b2701c62c49fd0e5e66870033883 not found: ID does not exist" Oct 13 08:21:31 crc kubenswrapper[4664]: I1013 08:21:31.059965 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" path="/var/lib/kubelet/pods/ae72bef5-66c7-4ab2-980d-1a721663f3fd/volumes" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.804498 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wzh72"] Oct 13 08:21:34 crc kubenswrapper[4664]: E1013 08:21:34.805569 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="extract-content" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.805585 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="extract-content" Oct 13 08:21:34 crc kubenswrapper[4664]: E1013 08:21:34.805611 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="registry-server" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.805619 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="registry-server" Oct 13 08:21:34 crc kubenswrapper[4664]: E1013 08:21:34.805647 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="extract-utilities" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.805656 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="extract-utilities" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.805873 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae72bef5-66c7-4ab2-980d-1a721663f3fd" containerName="registry-server" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.807486 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.839648 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wzh72"] Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.914050 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtgn5\" (UniqueName: \"kubernetes.io/projected/c510260c-f8c3-4a14-bd7a-34828a0a7533-kube-api-access-qtgn5\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.914153 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-utilities\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:34 crc kubenswrapper[4664]: I1013 08:21:34.914254 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-catalog-content\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.016405 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtgn5\" (UniqueName: \"kubernetes.io/projected/c510260c-f8c3-4a14-bd7a-34828a0a7533-kube-api-access-qtgn5\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.016480 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-utilities\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.016584 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-catalog-content\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.017089 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-catalog-content\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.017487 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-utilities\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.072658 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtgn5\" (UniqueName: \"kubernetes.io/projected/c510260c-f8c3-4a14-bd7a-34828a0a7533-kube-api-access-qtgn5\") pod \"community-operators-wzh72\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.136028 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:35 crc kubenswrapper[4664]: I1013 08:21:35.749305 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wzh72"] Oct 13 08:21:36 crc kubenswrapper[4664]: I1013 08:21:36.212082 4664 generic.go:334] "Generic (PLEG): container finished" podID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerID="347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee" exitCode=0 Oct 13 08:21:36 crc kubenswrapper[4664]: I1013 08:21:36.212167 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzh72" event={"ID":"c510260c-f8c3-4a14-bd7a-34828a0a7533","Type":"ContainerDied","Data":"347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee"} Oct 13 08:21:36 crc kubenswrapper[4664]: I1013 08:21:36.216538 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzh72" event={"ID":"c510260c-f8c3-4a14-bd7a-34828a0a7533","Type":"ContainerStarted","Data":"c66d21a4325943c4388e36d3e347a4accc8527216d5b99ba6d5e8fcae573093f"} Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.215233 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gbccw"] Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.218988 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.228679 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gbccw"] Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.254579 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzh72" event={"ID":"c510260c-f8c3-4a14-bd7a-34828a0a7533","Type":"ContainerStarted","Data":"333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae"} Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.378539 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-catalog-content\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.378666 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzddj\" (UniqueName: \"kubernetes.io/projected/93237d30-9e53-4fa2-94d6-b396f525b312-kube-api-access-pzddj\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.378710 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-utilities\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.480371 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-catalog-content\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.480509 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzddj\" (UniqueName: \"kubernetes.io/projected/93237d30-9e53-4fa2-94d6-b396f525b312-kube-api-access-pzddj\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.480571 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-utilities\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.480773 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-catalog-content\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.481088 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-utilities\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.499739 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzddj\" (UniqueName: \"kubernetes.io/projected/93237d30-9e53-4fa2-94d6-b396f525b312-kube-api-access-pzddj\") pod \"redhat-operators-gbccw\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:37 crc kubenswrapper[4664]: I1013 08:21:37.557710 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:38 crc kubenswrapper[4664]: I1013 08:21:38.111905 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gbccw"] Oct 13 08:21:38 crc kubenswrapper[4664]: I1013 08:21:38.264317 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbccw" event={"ID":"93237d30-9e53-4fa2-94d6-b396f525b312","Type":"ContainerStarted","Data":"ec3e54e202d14f85d72d6151e9c18be3e5bc4a4c14e37975185e07323cfb9d2e"} Oct 13 08:21:39 crc kubenswrapper[4664]: I1013 08:21:39.281414 4664 generic.go:334] "Generic (PLEG): container finished" podID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerID="333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae" exitCode=0 Oct 13 08:21:39 crc kubenswrapper[4664]: I1013 08:21:39.281545 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzh72" event={"ID":"c510260c-f8c3-4a14-bd7a-34828a0a7533","Type":"ContainerDied","Data":"333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae"} Oct 13 08:21:39 crc kubenswrapper[4664]: I1013 08:21:39.286288 4664 generic.go:334] "Generic (PLEG): container finished" podID="93237d30-9e53-4fa2-94d6-b396f525b312" containerID="539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760" exitCode=0 Oct 13 08:21:39 crc kubenswrapper[4664]: I1013 08:21:39.286356 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbccw" event={"ID":"93237d30-9e53-4fa2-94d6-b396f525b312","Type":"ContainerDied","Data":"539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760"} Oct 13 08:21:40 crc kubenswrapper[4664]: I1013 08:21:40.298858 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzh72" event={"ID":"c510260c-f8c3-4a14-bd7a-34828a0a7533","Type":"ContainerStarted","Data":"7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4"} Oct 13 08:21:40 crc kubenswrapper[4664]: I1013 08:21:40.301029 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbccw" event={"ID":"93237d30-9e53-4fa2-94d6-b396f525b312","Type":"ContainerStarted","Data":"6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85"} Oct 13 08:21:40 crc kubenswrapper[4664]: I1013 08:21:40.316197 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wzh72" podStartSLOduration=2.826846978 podStartE2EDuration="6.316173845s" podCreationTimestamp="2025-10-13 08:21:34 +0000 UTC" firstStartedPulling="2025-10-13 08:21:36.213445099 +0000 UTC m=+5703.900890291" lastFinishedPulling="2025-10-13 08:21:39.702771926 +0000 UTC m=+5707.390217158" observedRunningTime="2025-10-13 08:21:40.314515159 +0000 UTC m=+5708.001960361" watchObservedRunningTime="2025-10-13 08:21:40.316173845 +0000 UTC m=+5708.003619037" Oct 13 08:21:44 crc kubenswrapper[4664]: I1013 08:21:44.341137 4664 generic.go:334] "Generic (PLEG): container finished" podID="93237d30-9e53-4fa2-94d6-b396f525b312" containerID="6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85" exitCode=0 Oct 13 08:21:44 crc kubenswrapper[4664]: I1013 08:21:44.341168 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbccw" event={"ID":"93237d30-9e53-4fa2-94d6-b396f525b312","Type":"ContainerDied","Data":"6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85"} Oct 13 08:21:45 crc kubenswrapper[4664]: I1013 08:21:45.137158 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:45 crc kubenswrapper[4664]: I1013 08:21:45.137522 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:45 crc kubenswrapper[4664]: I1013 08:21:45.353390 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbccw" event={"ID":"93237d30-9e53-4fa2-94d6-b396f525b312","Type":"ContainerStarted","Data":"ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6"} Oct 13 08:21:45 crc kubenswrapper[4664]: I1013 08:21:45.394601 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gbccw" podStartSLOduration=2.96403346 podStartE2EDuration="8.394581336s" podCreationTimestamp="2025-10-13 08:21:37 +0000 UTC" firstStartedPulling="2025-10-13 08:21:39.311444244 +0000 UTC m=+5706.998889476" lastFinishedPulling="2025-10-13 08:21:44.74199213 +0000 UTC m=+5712.429437352" observedRunningTime="2025-10-13 08:21:45.390295881 +0000 UTC m=+5713.077741103" watchObservedRunningTime="2025-10-13 08:21:45.394581336 +0000 UTC m=+5713.082026538" Oct 13 08:21:46 crc kubenswrapper[4664]: I1013 08:21:46.191098 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wzh72" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="registry-server" probeResult="failure" output=< Oct 13 08:21:46 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:21:46 crc kubenswrapper[4664]: > Oct 13 08:21:47 crc kubenswrapper[4664]: I1013 08:21:47.558413 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:47 crc kubenswrapper[4664]: I1013 08:21:47.558921 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:21:48 crc kubenswrapper[4664]: I1013 08:21:48.606925 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gbccw" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="registry-server" probeResult="failure" output=< Oct 13 08:21:48 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:21:48 crc kubenswrapper[4664]: > Oct 13 08:21:55 crc kubenswrapper[4664]: I1013 08:21:55.221126 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:55 crc kubenswrapper[4664]: I1013 08:21:55.294022 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:55 crc kubenswrapper[4664]: I1013 08:21:55.467147 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wzh72"] Oct 13 08:21:56 crc kubenswrapper[4664]: I1013 08:21:56.481028 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wzh72" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="registry-server" containerID="cri-o://7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4" gracePeriod=2 Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.029602 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.168835 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtgn5\" (UniqueName: \"kubernetes.io/projected/c510260c-f8c3-4a14-bd7a-34828a0a7533-kube-api-access-qtgn5\") pod \"c510260c-f8c3-4a14-bd7a-34828a0a7533\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.168966 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-utilities\") pod \"c510260c-f8c3-4a14-bd7a-34828a0a7533\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.169153 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-catalog-content\") pod \"c510260c-f8c3-4a14-bd7a-34828a0a7533\" (UID: \"c510260c-f8c3-4a14-bd7a-34828a0a7533\") " Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.169655 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-utilities" (OuterVolumeSpecName: "utilities") pod "c510260c-f8c3-4a14-bd7a-34828a0a7533" (UID: "c510260c-f8c3-4a14-bd7a-34828a0a7533"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.177955 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c510260c-f8c3-4a14-bd7a-34828a0a7533-kube-api-access-qtgn5" (OuterVolumeSpecName: "kube-api-access-qtgn5") pod "c510260c-f8c3-4a14-bd7a-34828a0a7533" (UID: "c510260c-f8c3-4a14-bd7a-34828a0a7533"). InnerVolumeSpecName "kube-api-access-qtgn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.231664 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c510260c-f8c3-4a14-bd7a-34828a0a7533" (UID: "c510260c-f8c3-4a14-bd7a-34828a0a7533"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.272160 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtgn5\" (UniqueName: \"kubernetes.io/projected/c510260c-f8c3-4a14-bd7a-34828a0a7533-kube-api-access-qtgn5\") on node \"crc\" DevicePath \"\"" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.272211 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.272222 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c510260c-f8c3-4a14-bd7a-34828a0a7533-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.491959 4664 generic.go:334] "Generic (PLEG): container finished" podID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerID="7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4" exitCode=0 Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.491981 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzh72" event={"ID":"c510260c-f8c3-4a14-bd7a-34828a0a7533","Type":"ContainerDied","Data":"7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4"} Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.492053 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wzh72" event={"ID":"c510260c-f8c3-4a14-bd7a-34828a0a7533","Type":"ContainerDied","Data":"c66d21a4325943c4388e36d3e347a4accc8527216d5b99ba6d5e8fcae573093f"} Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.492086 4664 scope.go:117] "RemoveContainer" containerID="7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.493095 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wzh72" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.527130 4664 scope.go:117] "RemoveContainer" containerID="333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.546296 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wzh72"] Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.553630 4664 scope.go:117] "RemoveContainer" containerID="347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.558221 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wzh72"] Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.629438 4664 scope.go:117] "RemoveContainer" containerID="7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4" Oct 13 08:21:57 crc kubenswrapper[4664]: E1013 08:21:57.630086 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4\": container with ID starting with 7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4 not found: ID does not exist" containerID="7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.630131 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4"} err="failed to get container status \"7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4\": rpc error: code = NotFound desc = could not find container \"7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4\": container with ID starting with 7ef2a3800f871289123e8ce07588f9b6675a305600550b009169a0ab438a53c4 not found: ID does not exist" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.630168 4664 scope.go:117] "RemoveContainer" containerID="333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae" Oct 13 08:21:57 crc kubenswrapper[4664]: E1013 08:21:57.630685 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae\": container with ID starting with 333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae not found: ID does not exist" containerID="333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.630707 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae"} err="failed to get container status \"333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae\": rpc error: code = NotFound desc = could not find container \"333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae\": container with ID starting with 333c30347a69e9ce16959c3e4314b5e11a1a981499d2f780996090bbf8f08fae not found: ID does not exist" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.630723 4664 scope.go:117] "RemoveContainer" containerID="347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee" Oct 13 08:21:57 crc kubenswrapper[4664]: E1013 08:21:57.630937 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee\": container with ID starting with 347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee not found: ID does not exist" containerID="347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee" Oct 13 08:21:57 crc kubenswrapper[4664]: I1013 08:21:57.630955 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee"} err="failed to get container status \"347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee\": rpc error: code = NotFound desc = could not find container \"347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee\": container with ID starting with 347cf32f46b63e8a7cb552de30742e7890849f88168cd87f657bc1b21ff303ee not found: ID does not exist" Oct 13 08:21:58 crc kubenswrapper[4664]: I1013 08:21:58.627638 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gbccw" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="registry-server" probeResult="failure" output=< Oct 13 08:21:58 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:21:58 crc kubenswrapper[4664]: > Oct 13 08:21:58 crc kubenswrapper[4664]: I1013 08:21:58.812480 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:21:58 crc kubenswrapper[4664]: I1013 08:21:58.812536 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:21:59 crc kubenswrapper[4664]: I1013 08:21:59.058555 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" path="/var/lib/kubelet/pods/c510260c-f8c3-4a14-bd7a-34828a0a7533/volumes" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.726277 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-d5r22"] Oct 13 08:22:03 crc kubenswrapper[4664]: E1013 08:22:03.727507 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="extract-content" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.727527 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="extract-content" Oct 13 08:22:03 crc kubenswrapper[4664]: E1013 08:22:03.727544 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="registry-server" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.727552 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="registry-server" Oct 13 08:22:03 crc kubenswrapper[4664]: E1013 08:22:03.727588 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="extract-utilities" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.727597 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="extract-utilities" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.727882 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="c510260c-f8c3-4a14-bd7a-34828a0a7533" containerName="registry-server" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.729773 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.739814 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d5r22"] Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.803173 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btm6z\" (UniqueName: \"kubernetes.io/projected/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-kube-api-access-btm6z\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.803435 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-utilities\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.803582 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-catalog-content\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.905385 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-catalog-content\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.905606 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btm6z\" (UniqueName: \"kubernetes.io/projected/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-kube-api-access-btm6z\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.906046 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-utilities\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.906365 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-catalog-content\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.906821 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-utilities\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:03 crc kubenswrapper[4664]: I1013 08:22:03.928821 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btm6z\" (UniqueName: \"kubernetes.io/projected/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-kube-api-access-btm6z\") pod \"certified-operators-d5r22\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:04 crc kubenswrapper[4664]: I1013 08:22:04.071771 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:04 crc kubenswrapper[4664]: I1013 08:22:04.639194 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d5r22"] Oct 13 08:22:05 crc kubenswrapper[4664]: I1013 08:22:05.569551 4664 generic.go:334] "Generic (PLEG): container finished" podID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerID="8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a" exitCode=0 Oct 13 08:22:05 crc kubenswrapper[4664]: I1013 08:22:05.569687 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5r22" event={"ID":"1f121a39-67ae-4d7c-a14e-43af8bbbbf23","Type":"ContainerDied","Data":"8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a"} Oct 13 08:22:05 crc kubenswrapper[4664]: I1013 08:22:05.569866 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5r22" event={"ID":"1f121a39-67ae-4d7c-a14e-43af8bbbbf23","Type":"ContainerStarted","Data":"a9c6f31cda6aa29181c7459c345944fe9618bf575a27e40b30fe05857736009e"} Oct 13 08:22:06 crc kubenswrapper[4664]: I1013 08:22:06.580247 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5r22" event={"ID":"1f121a39-67ae-4d7c-a14e-43af8bbbbf23","Type":"ContainerStarted","Data":"e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f"} Oct 13 08:22:08 crc kubenswrapper[4664]: I1013 08:22:08.600353 4664 generic.go:334] "Generic (PLEG): container finished" podID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerID="e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f" exitCode=0 Oct 13 08:22:08 crc kubenswrapper[4664]: I1013 08:22:08.600411 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5r22" event={"ID":"1f121a39-67ae-4d7c-a14e-43af8bbbbf23","Type":"ContainerDied","Data":"e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f"} Oct 13 08:22:08 crc kubenswrapper[4664]: I1013 08:22:08.618061 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gbccw" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="registry-server" probeResult="failure" output=< Oct 13 08:22:08 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:22:08 crc kubenswrapper[4664]: > Oct 13 08:22:09 crc kubenswrapper[4664]: I1013 08:22:09.620356 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5r22" event={"ID":"1f121a39-67ae-4d7c-a14e-43af8bbbbf23","Type":"ContainerStarted","Data":"7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d"} Oct 13 08:22:09 crc kubenswrapper[4664]: I1013 08:22:09.647240 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-d5r22" podStartSLOduration=3.096145886 podStartE2EDuration="6.64721786s" podCreationTimestamp="2025-10-13 08:22:03 +0000 UTC" firstStartedPulling="2025-10-13 08:22:05.571238277 +0000 UTC m=+5733.258683469" lastFinishedPulling="2025-10-13 08:22:09.122310211 +0000 UTC m=+5736.809755443" observedRunningTime="2025-10-13 08:22:09.642673768 +0000 UTC m=+5737.330118980" watchObservedRunningTime="2025-10-13 08:22:09.64721786 +0000 UTC m=+5737.334663062" Oct 13 08:22:14 crc kubenswrapper[4664]: I1013 08:22:14.073244 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:14 crc kubenswrapper[4664]: I1013 08:22:14.073945 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:15 crc kubenswrapper[4664]: I1013 08:22:15.150742 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-d5r22" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="registry-server" probeResult="failure" output=< Oct 13 08:22:15 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:22:15 crc kubenswrapper[4664]: > Oct 13 08:22:17 crc kubenswrapper[4664]: I1013 08:22:17.612335 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:22:17 crc kubenswrapper[4664]: I1013 08:22:17.673757 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:22:18 crc kubenswrapper[4664]: I1013 08:22:18.607562 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gbccw"] Oct 13 08:22:18 crc kubenswrapper[4664]: I1013 08:22:18.719828 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gbccw" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="registry-server" containerID="cri-o://ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6" gracePeriod=2 Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.407383 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.521378 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzddj\" (UniqueName: \"kubernetes.io/projected/93237d30-9e53-4fa2-94d6-b396f525b312-kube-api-access-pzddj\") pod \"93237d30-9e53-4fa2-94d6-b396f525b312\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.521523 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-catalog-content\") pod \"93237d30-9e53-4fa2-94d6-b396f525b312\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.521570 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-utilities\") pod \"93237d30-9e53-4fa2-94d6-b396f525b312\" (UID: \"93237d30-9e53-4fa2-94d6-b396f525b312\") " Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.522532 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-utilities" (OuterVolumeSpecName: "utilities") pod "93237d30-9e53-4fa2-94d6-b396f525b312" (UID: "93237d30-9e53-4fa2-94d6-b396f525b312"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.532983 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93237d30-9e53-4fa2-94d6-b396f525b312-kube-api-access-pzddj" (OuterVolumeSpecName: "kube-api-access-pzddj") pod "93237d30-9e53-4fa2-94d6-b396f525b312" (UID: "93237d30-9e53-4fa2-94d6-b396f525b312"). InnerVolumeSpecName "kube-api-access-pzddj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.605682 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93237d30-9e53-4fa2-94d6-b396f525b312" (UID: "93237d30-9e53-4fa2-94d6-b396f525b312"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.624478 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzddj\" (UniqueName: \"kubernetes.io/projected/93237d30-9e53-4fa2-94d6-b396f525b312-kube-api-access-pzddj\") on node \"crc\" DevicePath \"\"" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.624509 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.624519 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93237d30-9e53-4fa2-94d6-b396f525b312-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.731868 4664 generic.go:334] "Generic (PLEG): container finished" podID="93237d30-9e53-4fa2-94d6-b396f525b312" containerID="ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6" exitCode=0 Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.731920 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbccw" event={"ID":"93237d30-9e53-4fa2-94d6-b396f525b312","Type":"ContainerDied","Data":"ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6"} Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.731960 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gbccw" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.731995 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gbccw" event={"ID":"93237d30-9e53-4fa2-94d6-b396f525b312","Type":"ContainerDied","Data":"ec3e54e202d14f85d72d6151e9c18be3e5bc4a4c14e37975185e07323cfb9d2e"} Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.732030 4664 scope.go:117] "RemoveContainer" containerID="ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.768887 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gbccw"] Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.773311 4664 scope.go:117] "RemoveContainer" containerID="6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.777194 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gbccw"] Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.795816 4664 scope.go:117] "RemoveContainer" containerID="539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.863062 4664 scope.go:117] "RemoveContainer" containerID="ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6" Oct 13 08:22:19 crc kubenswrapper[4664]: E1013 08:22:19.863382 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6\": container with ID starting with ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6 not found: ID does not exist" containerID="ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.863410 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6"} err="failed to get container status \"ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6\": rpc error: code = NotFound desc = could not find container \"ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6\": container with ID starting with ad6101a13f078e53f80990040c382bae67fa2d9f7bdbf9907c61b254495e8fd6 not found: ID does not exist" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.863429 4664 scope.go:117] "RemoveContainer" containerID="6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85" Oct 13 08:22:19 crc kubenswrapper[4664]: E1013 08:22:19.863836 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85\": container with ID starting with 6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85 not found: ID does not exist" containerID="6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.863855 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85"} err="failed to get container status \"6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85\": rpc error: code = NotFound desc = could not find container \"6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85\": container with ID starting with 6d96dbb7f5d6cc4631ce382583568ea3ea3c517b64f1583dea8d7aff5618aa85 not found: ID does not exist" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.863867 4664 scope.go:117] "RemoveContainer" containerID="539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760" Oct 13 08:22:19 crc kubenswrapper[4664]: E1013 08:22:19.864144 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760\": container with ID starting with 539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760 not found: ID does not exist" containerID="539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760" Oct 13 08:22:19 crc kubenswrapper[4664]: I1013 08:22:19.864195 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760"} err="failed to get container status \"539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760\": rpc error: code = NotFound desc = could not find container \"539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760\": container with ID starting with 539ff85f4c81e08a5e987a4f07f2a521456ff73e06e10a5581d5c483f2d19760 not found: ID does not exist" Oct 13 08:22:21 crc kubenswrapper[4664]: I1013 08:22:21.059969 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" path="/var/lib/kubelet/pods/93237d30-9e53-4fa2-94d6-b396f525b312/volumes" Oct 13 08:22:24 crc kubenswrapper[4664]: I1013 08:22:24.149707 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:24 crc kubenswrapper[4664]: I1013 08:22:24.207448 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:24 crc kubenswrapper[4664]: I1013 08:22:24.394415 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d5r22"] Oct 13 08:22:25 crc kubenswrapper[4664]: I1013 08:22:25.789400 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-d5r22" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="registry-server" containerID="cri-o://7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d" gracePeriod=2 Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.277085 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.359266 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-catalog-content\") pod \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.359468 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-utilities\") pod \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.359645 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btm6z\" (UniqueName: \"kubernetes.io/projected/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-kube-api-access-btm6z\") pod \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\" (UID: \"1f121a39-67ae-4d7c-a14e-43af8bbbbf23\") " Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.360174 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-utilities" (OuterVolumeSpecName: "utilities") pod "1f121a39-67ae-4d7c-a14e-43af8bbbbf23" (UID: "1f121a39-67ae-4d7c-a14e-43af8bbbbf23"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.366041 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-kube-api-access-btm6z" (OuterVolumeSpecName: "kube-api-access-btm6z") pod "1f121a39-67ae-4d7c-a14e-43af8bbbbf23" (UID: "1f121a39-67ae-4d7c-a14e-43af8bbbbf23"). InnerVolumeSpecName "kube-api-access-btm6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.428523 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f121a39-67ae-4d7c-a14e-43af8bbbbf23" (UID: "1f121a39-67ae-4d7c-a14e-43af8bbbbf23"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.461959 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.461991 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btm6z\" (UniqueName: \"kubernetes.io/projected/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-kube-api-access-btm6z\") on node \"crc\" DevicePath \"\"" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.462001 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f121a39-67ae-4d7c-a14e-43af8bbbbf23-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.800590 4664 generic.go:334] "Generic (PLEG): container finished" podID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerID="7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d" exitCode=0 Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.801128 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d5r22" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.802641 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5r22" event={"ID":"1f121a39-67ae-4d7c-a14e-43af8bbbbf23","Type":"ContainerDied","Data":"7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d"} Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.802913 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d5r22" event={"ID":"1f121a39-67ae-4d7c-a14e-43af8bbbbf23","Type":"ContainerDied","Data":"a9c6f31cda6aa29181c7459c345944fe9618bf575a27e40b30fe05857736009e"} Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.802939 4664 scope.go:117] "RemoveContainer" containerID="7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.827635 4664 scope.go:117] "RemoveContainer" containerID="e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.858867 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d5r22"] Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.873867 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-d5r22"] Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.903412 4664 scope.go:117] "RemoveContainer" containerID="8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.949574 4664 scope.go:117] "RemoveContainer" containerID="7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d" Oct 13 08:22:26 crc kubenswrapper[4664]: E1013 08:22:26.950535 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d\": container with ID starting with 7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d not found: ID does not exist" containerID="7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.950576 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d"} err="failed to get container status \"7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d\": rpc error: code = NotFound desc = could not find container \"7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d\": container with ID starting with 7931e01f2e023ae438a0d4ac9d69fe20a4b14856390420b39de680b6b99b487d not found: ID does not exist" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.950621 4664 scope.go:117] "RemoveContainer" containerID="e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f" Oct 13 08:22:26 crc kubenswrapper[4664]: E1013 08:22:26.950963 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f\": container with ID starting with e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f not found: ID does not exist" containerID="e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.950992 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f"} err="failed to get container status \"e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f\": rpc error: code = NotFound desc = could not find container \"e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f\": container with ID starting with e4cc63209af98a536fc857f06895ec58a03df4be28314139685058a32b114b1f not found: ID does not exist" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.951013 4664 scope.go:117] "RemoveContainer" containerID="8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a" Oct 13 08:22:26 crc kubenswrapper[4664]: E1013 08:22:26.951416 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a\": container with ID starting with 8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a not found: ID does not exist" containerID="8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a" Oct 13 08:22:26 crc kubenswrapper[4664]: I1013 08:22:26.951458 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a"} err="failed to get container status \"8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a\": rpc error: code = NotFound desc = could not find container \"8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a\": container with ID starting with 8a039041a962e3b66a902620ef44f192e821c4ea152d722988434fe55b8c0a6a not found: ID does not exist" Oct 13 08:22:27 crc kubenswrapper[4664]: I1013 08:22:27.059808 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" path="/var/lib/kubelet/pods/1f121a39-67ae-4d7c-a14e-43af8bbbbf23/volumes" Oct 13 08:22:28 crc kubenswrapper[4664]: I1013 08:22:28.812270 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:22:28 crc kubenswrapper[4664]: I1013 08:22:28.812704 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:22:58 crc kubenswrapper[4664]: I1013 08:22:58.812077 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:22:58 crc kubenswrapper[4664]: I1013 08:22:58.812724 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:22:58 crc kubenswrapper[4664]: I1013 08:22:58.812835 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:22:58 crc kubenswrapper[4664]: I1013 08:22:58.813750 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"94e4aabad2b1de3fc8ecafacb4a304a43e4d5596b9134856154624f047695f53"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:22:58 crc kubenswrapper[4664]: I1013 08:22:58.813908 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://94e4aabad2b1de3fc8ecafacb4a304a43e4d5596b9134856154624f047695f53" gracePeriod=600 Oct 13 08:22:59 crc kubenswrapper[4664]: I1013 08:22:59.186333 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="94e4aabad2b1de3fc8ecafacb4a304a43e4d5596b9134856154624f047695f53" exitCode=0 Oct 13 08:22:59 crc kubenswrapper[4664]: I1013 08:22:59.186423 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"94e4aabad2b1de3fc8ecafacb4a304a43e4d5596b9134856154624f047695f53"} Oct 13 08:22:59 crc kubenswrapper[4664]: I1013 08:22:59.186724 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf"} Oct 13 08:22:59 crc kubenswrapper[4664]: I1013 08:22:59.186761 4664 scope.go:117] "RemoveContainer" containerID="6817837dece98affe7ebbbab0aa7e1c2733998e8b5162a120e0fc427138bd191" Oct 13 08:25:28 crc kubenswrapper[4664]: I1013 08:25:28.811934 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:25:28 crc kubenswrapper[4664]: I1013 08:25:28.812882 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:25:58 crc kubenswrapper[4664]: I1013 08:25:58.811474 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:25:58 crc kubenswrapper[4664]: I1013 08:25:58.812033 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:26:28 crc kubenswrapper[4664]: I1013 08:26:28.812425 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:26:28 crc kubenswrapper[4664]: I1013 08:26:28.813360 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:26:28 crc kubenswrapper[4664]: I1013 08:26:28.813436 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:26:28 crc kubenswrapper[4664]: I1013 08:26:28.814660 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:26:28 crc kubenswrapper[4664]: I1013 08:26:28.814774 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" gracePeriod=600 Oct 13 08:26:28 crc kubenswrapper[4664]: E1013 08:26:28.940989 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:26:29 crc kubenswrapper[4664]: I1013 08:26:29.415591 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" exitCode=0 Oct 13 08:26:29 crc kubenswrapper[4664]: I1013 08:26:29.415654 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf"} Oct 13 08:26:29 crc kubenswrapper[4664]: I1013 08:26:29.415699 4664 scope.go:117] "RemoveContainer" containerID="94e4aabad2b1de3fc8ecafacb4a304a43e4d5596b9134856154624f047695f53" Oct 13 08:26:29 crc kubenswrapper[4664]: I1013 08:26:29.416589 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:26:29 crc kubenswrapper[4664]: E1013 08:26:29.416912 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:26:42 crc kubenswrapper[4664]: I1013 08:26:42.046966 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:26:42 crc kubenswrapper[4664]: E1013 08:26:42.047834 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:26:57 crc kubenswrapper[4664]: I1013 08:26:57.049424 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:26:57 crc kubenswrapper[4664]: E1013 08:26:57.050090 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:27:12 crc kubenswrapper[4664]: I1013 08:27:12.047984 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:27:12 crc kubenswrapper[4664]: E1013 08:27:12.049007 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:27:26 crc kubenswrapper[4664]: I1013 08:27:26.047417 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:27:26 crc kubenswrapper[4664]: E1013 08:27:26.048182 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:27:37 crc kubenswrapper[4664]: I1013 08:27:37.048258 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:27:37 crc kubenswrapper[4664]: E1013 08:27:37.049338 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:27:50 crc kubenswrapper[4664]: I1013 08:27:50.048598 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:27:50 crc kubenswrapper[4664]: E1013 08:27:50.049262 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:28:03 crc kubenswrapper[4664]: I1013 08:28:03.055980 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:28:03 crc kubenswrapper[4664]: E1013 08:28:03.056825 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:28:15 crc kubenswrapper[4664]: I1013 08:28:15.047370 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:28:15 crc kubenswrapper[4664]: E1013 08:28:15.048863 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:28:29 crc kubenswrapper[4664]: I1013 08:28:29.047828 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:28:29 crc kubenswrapper[4664]: E1013 08:28:29.048642 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:28:40 crc kubenswrapper[4664]: I1013 08:28:40.047609 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:28:40 crc kubenswrapper[4664]: E1013 08:28:40.048245 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:28:54 crc kubenswrapper[4664]: I1013 08:28:54.047145 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:28:54 crc kubenswrapper[4664]: E1013 08:28:54.047971 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:29:07 crc kubenswrapper[4664]: I1013 08:29:07.047937 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:29:07 crc kubenswrapper[4664]: E1013 08:29:07.049034 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:29:21 crc kubenswrapper[4664]: I1013 08:29:21.047667 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:29:21 crc kubenswrapper[4664]: E1013 08:29:21.048695 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:29:32 crc kubenswrapper[4664]: I1013 08:29:32.046831 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:29:32 crc kubenswrapper[4664]: E1013 08:29:32.047629 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:29:43 crc kubenswrapper[4664]: I1013 08:29:43.060334 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:29:43 crc kubenswrapper[4664]: E1013 08:29:43.061505 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:29:58 crc kubenswrapper[4664]: I1013 08:29:58.047595 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:29:58 crc kubenswrapper[4664]: E1013 08:29:58.048255 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.165999 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5"] Oct 13 08:30:00 crc kubenswrapper[4664]: E1013 08:30:00.166849 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="extract-utilities" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.166870 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="extract-utilities" Oct 13 08:30:00 crc kubenswrapper[4664]: E1013 08:30:00.166893 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="extract-utilities" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.166901 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="extract-utilities" Oct 13 08:30:00 crc kubenswrapper[4664]: E1013 08:30:00.166921 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="extract-content" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.166929 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="extract-content" Oct 13 08:30:00 crc kubenswrapper[4664]: E1013 08:30:00.166949 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="registry-server" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.166956 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="registry-server" Oct 13 08:30:00 crc kubenswrapper[4664]: E1013 08:30:00.166974 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="registry-server" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.166981 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="registry-server" Oct 13 08:30:00 crc kubenswrapper[4664]: E1013 08:30:00.166999 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="extract-content" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.167006 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="extract-content" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.167253 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="93237d30-9e53-4fa2-94d6-b396f525b312" containerName="registry-server" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.167275 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f121a39-67ae-4d7c-a14e-43af8bbbbf23" containerName="registry-server" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.169995 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.175436 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.176102 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.210228 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5"] Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.211957 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6519e97-c4b7-46bf-9489-ad844d762979-secret-volume\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.212024 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6519e97-c4b7-46bf-9489-ad844d762979-config-volume\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.212161 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqblj\" (UniqueName: \"kubernetes.io/projected/d6519e97-c4b7-46bf-9489-ad844d762979-kube-api-access-gqblj\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.313844 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqblj\" (UniqueName: \"kubernetes.io/projected/d6519e97-c4b7-46bf-9489-ad844d762979-kube-api-access-gqblj\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.313999 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6519e97-c4b7-46bf-9489-ad844d762979-secret-volume\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.314049 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6519e97-c4b7-46bf-9489-ad844d762979-config-volume\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.315031 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6519e97-c4b7-46bf-9489-ad844d762979-config-volume\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.332356 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6519e97-c4b7-46bf-9489-ad844d762979-secret-volume\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.335952 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqblj\" (UniqueName: \"kubernetes.io/projected/d6519e97-c4b7-46bf-9489-ad844d762979-kube-api-access-gqblj\") pod \"collect-profiles-29339070-9cwv5\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:00 crc kubenswrapper[4664]: I1013 08:30:00.506703 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:01 crc kubenswrapper[4664]: I1013 08:30:01.062090 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5"] Oct 13 08:30:01 crc kubenswrapper[4664]: I1013 08:30:01.756690 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" event={"ID":"d6519e97-c4b7-46bf-9489-ad844d762979","Type":"ContainerStarted","Data":"f29e57bd6ed7c118ce3e25808a1d4d084ebf3f6d978f52cdf09c32bd6fcfa656"} Oct 13 08:30:01 crc kubenswrapper[4664]: I1013 08:30:01.756958 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" event={"ID":"d6519e97-c4b7-46bf-9489-ad844d762979","Type":"ContainerStarted","Data":"2c41262310f904b352f613baddfcb319e00a38adffe9379da8094dee1624dd2a"} Oct 13 08:30:01 crc kubenswrapper[4664]: I1013 08:30:01.781743 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" podStartSLOduration=1.781726127 podStartE2EDuration="1.781726127s" podCreationTimestamp="2025-10-13 08:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 08:30:01.777224716 +0000 UTC m=+6209.464669908" watchObservedRunningTime="2025-10-13 08:30:01.781726127 +0000 UTC m=+6209.469171319" Oct 13 08:30:02 crc kubenswrapper[4664]: I1013 08:30:02.770945 4664 generic.go:334] "Generic (PLEG): container finished" podID="d6519e97-c4b7-46bf-9489-ad844d762979" containerID="f29e57bd6ed7c118ce3e25808a1d4d084ebf3f6d978f52cdf09c32bd6fcfa656" exitCode=0 Oct 13 08:30:02 crc kubenswrapper[4664]: I1013 08:30:02.770997 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" event={"ID":"d6519e97-c4b7-46bf-9489-ad844d762979","Type":"ContainerDied","Data":"f29e57bd6ed7c118ce3e25808a1d4d084ebf3f6d978f52cdf09c32bd6fcfa656"} Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.181345 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.299469 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6519e97-c4b7-46bf-9489-ad844d762979-secret-volume\") pod \"d6519e97-c4b7-46bf-9489-ad844d762979\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.299593 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqblj\" (UniqueName: \"kubernetes.io/projected/d6519e97-c4b7-46bf-9489-ad844d762979-kube-api-access-gqblj\") pod \"d6519e97-c4b7-46bf-9489-ad844d762979\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.299671 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6519e97-c4b7-46bf-9489-ad844d762979-config-volume\") pod \"d6519e97-c4b7-46bf-9489-ad844d762979\" (UID: \"d6519e97-c4b7-46bf-9489-ad844d762979\") " Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.300887 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6519e97-c4b7-46bf-9489-ad844d762979-config-volume" (OuterVolumeSpecName: "config-volume") pod "d6519e97-c4b7-46bf-9489-ad844d762979" (UID: "d6519e97-c4b7-46bf-9489-ad844d762979"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.307778 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6519e97-c4b7-46bf-9489-ad844d762979-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d6519e97-c4b7-46bf-9489-ad844d762979" (UID: "d6519e97-c4b7-46bf-9489-ad844d762979"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.308229 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6519e97-c4b7-46bf-9489-ad844d762979-kube-api-access-gqblj" (OuterVolumeSpecName: "kube-api-access-gqblj") pod "d6519e97-c4b7-46bf-9489-ad844d762979" (UID: "d6519e97-c4b7-46bf-9489-ad844d762979"). InnerVolumeSpecName "kube-api-access-gqblj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.403007 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqblj\" (UniqueName: \"kubernetes.io/projected/d6519e97-c4b7-46bf-9489-ad844d762979-kube-api-access-gqblj\") on node \"crc\" DevicePath \"\"" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.403358 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6519e97-c4b7-46bf-9489-ad844d762979-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.403508 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6519e97-c4b7-46bf-9489-ad844d762979-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.797571 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" event={"ID":"d6519e97-c4b7-46bf-9489-ad844d762979","Type":"ContainerDied","Data":"2c41262310f904b352f613baddfcb319e00a38adffe9379da8094dee1624dd2a"} Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.797602 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.797620 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c41262310f904b352f613baddfcb319e00a38adffe9379da8094dee1624dd2a" Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.885729 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269"] Oct 13 08:30:04 crc kubenswrapper[4664]: I1013 08:30:04.895939 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339025-2q269"] Oct 13 08:30:05 crc kubenswrapper[4664]: I1013 08:30:05.061301 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90a5f03e-7c3b-4264-ae15-83ce4dd9d890" path="/var/lib/kubelet/pods/90a5f03e-7c3b-4264-ae15-83ce4dd9d890/volumes" Oct 13 08:30:11 crc kubenswrapper[4664]: I1013 08:30:11.047552 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:30:11 crc kubenswrapper[4664]: E1013 08:30:11.049162 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:30:23 crc kubenswrapper[4664]: I1013 08:30:23.065429 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:30:23 crc kubenswrapper[4664]: E1013 08:30:23.068069 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:30:34 crc kubenswrapper[4664]: I1013 08:30:34.047928 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:30:34 crc kubenswrapper[4664]: E1013 08:30:34.048996 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:30:43 crc kubenswrapper[4664]: I1013 08:30:43.175013 4664 scope.go:117] "RemoveContainer" containerID="a86c63121fd104cc0b20de960528c4dc7adba56889369380e4af08df22101756" Oct 13 08:30:48 crc kubenswrapper[4664]: I1013 08:30:48.046926 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:30:48 crc kubenswrapper[4664]: E1013 08:30:48.047620 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:30:59 crc kubenswrapper[4664]: I1013 08:30:59.047455 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:30:59 crc kubenswrapper[4664]: E1013 08:30:59.048513 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:31:13 crc kubenswrapper[4664]: I1013 08:31:13.052289 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:31:13 crc kubenswrapper[4664]: E1013 08:31:13.052916 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:31:27 crc kubenswrapper[4664]: I1013 08:31:27.047317 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:31:27 crc kubenswrapper[4664]: E1013 08:31:27.048102 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:31:41 crc kubenswrapper[4664]: I1013 08:31:41.046753 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:31:41 crc kubenswrapper[4664]: I1013 08:31:41.857234 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"5ab89ae3db5ae0813993b5f7f6735956eec58f1317f16bfb84369ef9eb071a54"} Oct 13 08:31:45 crc kubenswrapper[4664]: I1013 08:31:45.996910 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-smb66"] Oct 13 08:31:45 crc kubenswrapper[4664]: E1013 08:31:45.999457 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6519e97-c4b7-46bf-9489-ad844d762979" containerName="collect-profiles" Oct 13 08:31:45 crc kubenswrapper[4664]: I1013 08:31:45.999568 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6519e97-c4b7-46bf-9489-ad844d762979" containerName="collect-profiles" Oct 13 08:31:45 crc kubenswrapper[4664]: I1013 08:31:45.999956 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6519e97-c4b7-46bf-9489-ad844d762979" containerName="collect-profiles" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.001788 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.036460 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-smb66"] Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.148528 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-catalog-content\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.149139 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-utilities\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.149285 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vx4n\" (UniqueName: \"kubernetes.io/projected/4976b4bb-0dca-48bc-aabd-50d00c35696d-kube-api-access-2vx4n\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.251624 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-catalog-content\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.251675 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-utilities\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.251727 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vx4n\" (UniqueName: \"kubernetes.io/projected/4976b4bb-0dca-48bc-aabd-50d00c35696d-kube-api-access-2vx4n\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.252442 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-catalog-content\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.252727 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-utilities\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.273863 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vx4n\" (UniqueName: \"kubernetes.io/projected/4976b4bb-0dca-48bc-aabd-50d00c35696d-kube-api-access-2vx4n\") pod \"community-operators-smb66\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.320773 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.848169 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-smb66"] Oct 13 08:31:46 crc kubenswrapper[4664]: I1013 08:31:46.912637 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smb66" event={"ID":"4976b4bb-0dca-48bc-aabd-50d00c35696d","Type":"ContainerStarted","Data":"2cfa2c03475c0c2a7ddac9172794d935a631cfc0002bc7473b168ddaa4cd2cb4"} Oct 13 08:31:47 crc kubenswrapper[4664]: I1013 08:31:47.921767 4664 generic.go:334] "Generic (PLEG): container finished" podID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerID="18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43" exitCode=0 Oct 13 08:31:47 crc kubenswrapper[4664]: I1013 08:31:47.921827 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smb66" event={"ID":"4976b4bb-0dca-48bc-aabd-50d00c35696d","Type":"ContainerDied","Data":"18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43"} Oct 13 08:31:47 crc kubenswrapper[4664]: I1013 08:31:47.925293 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:31:48 crc kubenswrapper[4664]: I1013 08:31:48.939269 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smb66" event={"ID":"4976b4bb-0dca-48bc-aabd-50d00c35696d","Type":"ContainerStarted","Data":"75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece"} Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.195933 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vpv4r"] Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.200174 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.213651 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpv4r"] Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.352175 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-catalog-content\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.352257 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt229\" (UniqueName: \"kubernetes.io/projected/7fb8ac9e-9730-4917-9993-85e19ef980ee-kube-api-access-qt229\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.352482 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-utilities\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.453997 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-catalog-content\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.454107 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt229\" (UniqueName: \"kubernetes.io/projected/7fb8ac9e-9730-4917-9993-85e19ef980ee-kube-api-access-qt229\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.454167 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-utilities\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.454566 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-catalog-content\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.454622 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-utilities\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.476917 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt229\" (UniqueName: \"kubernetes.io/projected/7fb8ac9e-9730-4917-9993-85e19ef980ee-kube-api-access-qt229\") pod \"redhat-marketplace-vpv4r\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.538147 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.795100 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8mvvl"] Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.797379 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.803298 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8mvvl"] Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.961769 4664 generic.go:334] "Generic (PLEG): container finished" podID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerID="75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece" exitCode=0 Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.961833 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smb66" event={"ID":"4976b4bb-0dca-48bc-aabd-50d00c35696d","Type":"ContainerDied","Data":"75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece"} Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.964834 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-catalog-content\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.966376 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kj55\" (UniqueName: \"kubernetes.io/projected/6c0800bc-60c4-4638-a721-6de61f32a903-kube-api-access-8kj55\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:50 crc kubenswrapper[4664]: I1013 08:31:50.966405 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-utilities\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.045185 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpv4r"] Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.068671 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kj55\" (UniqueName: \"kubernetes.io/projected/6c0800bc-60c4-4638-a721-6de61f32a903-kube-api-access-8kj55\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.068713 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-utilities\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.069472 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-catalog-content\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.069505 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-utilities\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.069916 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-catalog-content\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.088389 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kj55\" (UniqueName: \"kubernetes.io/projected/6c0800bc-60c4-4638-a721-6de61f32a903-kube-api-access-8kj55\") pod \"redhat-operators-8mvvl\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.129373 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.620570 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8mvvl"] Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.973076 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smb66" event={"ID":"4976b4bb-0dca-48bc-aabd-50d00c35696d","Type":"ContainerStarted","Data":"0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189"} Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.974478 4664 generic.go:334] "Generic (PLEG): container finished" podID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerID="975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f" exitCode=0 Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.974516 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpv4r" event={"ID":"7fb8ac9e-9730-4917-9993-85e19ef980ee","Type":"ContainerDied","Data":"975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f"} Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.974532 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpv4r" event={"ID":"7fb8ac9e-9730-4917-9993-85e19ef980ee","Type":"ContainerStarted","Data":"59a45acf873a7bcfa4c15ac782818fc79a956e1852fc073fda6fbb22db1bec27"} Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.976713 4664 generic.go:334] "Generic (PLEG): container finished" podID="6c0800bc-60c4-4638-a721-6de61f32a903" containerID="c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd" exitCode=0 Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.976738 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8mvvl" event={"ID":"6c0800bc-60c4-4638-a721-6de61f32a903","Type":"ContainerDied","Data":"c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd"} Oct 13 08:31:51 crc kubenswrapper[4664]: I1013 08:31:51.976752 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8mvvl" event={"ID":"6c0800bc-60c4-4638-a721-6de61f32a903","Type":"ContainerStarted","Data":"5d8e42fccd99faeb678f681c0c9bad4a144aef2d4d4e488b35fb50777005a1e2"} Oct 13 08:31:52 crc kubenswrapper[4664]: I1013 08:31:52.002143 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-smb66" podStartSLOduration=3.485203771 podStartE2EDuration="7.002120193s" podCreationTimestamp="2025-10-13 08:31:45 +0000 UTC" firstStartedPulling="2025-10-13 08:31:47.924933877 +0000 UTC m=+6315.612379079" lastFinishedPulling="2025-10-13 08:31:51.441850309 +0000 UTC m=+6319.129295501" observedRunningTime="2025-10-13 08:31:51.996398718 +0000 UTC m=+6319.683843910" watchObservedRunningTime="2025-10-13 08:31:52.002120193 +0000 UTC m=+6319.689565385" Oct 13 08:31:52 crc kubenswrapper[4664]: I1013 08:31:52.986627 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8mvvl" event={"ID":"6c0800bc-60c4-4638-a721-6de61f32a903","Type":"ContainerStarted","Data":"ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48"} Oct 13 08:31:52 crc kubenswrapper[4664]: I1013 08:31:52.989432 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpv4r" event={"ID":"7fb8ac9e-9730-4917-9993-85e19ef980ee","Type":"ContainerStarted","Data":"bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5"} Oct 13 08:31:54 crc kubenswrapper[4664]: I1013 08:31:54.001299 4664 generic.go:334] "Generic (PLEG): container finished" podID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerID="bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5" exitCode=0 Oct 13 08:31:54 crc kubenswrapper[4664]: I1013 08:31:54.002540 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpv4r" event={"ID":"7fb8ac9e-9730-4917-9993-85e19ef980ee","Type":"ContainerDied","Data":"bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5"} Oct 13 08:31:55 crc kubenswrapper[4664]: I1013 08:31:55.014835 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpv4r" event={"ID":"7fb8ac9e-9730-4917-9993-85e19ef980ee","Type":"ContainerStarted","Data":"85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7"} Oct 13 08:31:55 crc kubenswrapper[4664]: I1013 08:31:55.040469 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vpv4r" podStartSLOduration=2.333925349 podStartE2EDuration="5.040449046s" podCreationTimestamp="2025-10-13 08:31:50 +0000 UTC" firstStartedPulling="2025-10-13 08:31:51.975829343 +0000 UTC m=+6319.663274535" lastFinishedPulling="2025-10-13 08:31:54.68235302 +0000 UTC m=+6322.369798232" observedRunningTime="2025-10-13 08:31:55.037068075 +0000 UTC m=+6322.724513327" watchObservedRunningTime="2025-10-13 08:31:55.040449046 +0000 UTC m=+6322.727894228" Oct 13 08:31:56 crc kubenswrapper[4664]: I1013 08:31:56.321526 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:56 crc kubenswrapper[4664]: I1013 08:31:56.321945 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:31:57 crc kubenswrapper[4664]: I1013 08:31:57.033746 4664 generic.go:334] "Generic (PLEG): container finished" podID="6c0800bc-60c4-4638-a721-6de61f32a903" containerID="ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48" exitCode=0 Oct 13 08:31:57 crc kubenswrapper[4664]: I1013 08:31:57.033801 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8mvvl" event={"ID":"6c0800bc-60c4-4638-a721-6de61f32a903","Type":"ContainerDied","Data":"ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48"} Oct 13 08:31:57 crc kubenswrapper[4664]: I1013 08:31:57.388740 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-smb66" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="registry-server" probeResult="failure" output=< Oct 13 08:31:57 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:31:57 crc kubenswrapper[4664]: > Oct 13 08:31:58 crc kubenswrapper[4664]: I1013 08:31:58.047937 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8mvvl" event={"ID":"6c0800bc-60c4-4638-a721-6de61f32a903","Type":"ContainerStarted","Data":"661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2"} Oct 13 08:31:58 crc kubenswrapper[4664]: I1013 08:31:58.075170 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8mvvl" podStartSLOduration=2.611171002 podStartE2EDuration="8.075147171s" podCreationTimestamp="2025-10-13 08:31:50 +0000 UTC" firstStartedPulling="2025-10-13 08:31:51.97980734 +0000 UTC m=+6319.667252532" lastFinishedPulling="2025-10-13 08:31:57.443783509 +0000 UTC m=+6325.131228701" observedRunningTime="2025-10-13 08:31:58.067155516 +0000 UTC m=+6325.754600728" watchObservedRunningTime="2025-10-13 08:31:58.075147171 +0000 UTC m=+6325.762592363" Oct 13 08:32:00 crc kubenswrapper[4664]: I1013 08:32:00.538755 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:32:00 crc kubenswrapper[4664]: I1013 08:32:00.539104 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:32:00 crc kubenswrapper[4664]: I1013 08:32:00.609419 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:32:01 crc kubenswrapper[4664]: I1013 08:32:01.127104 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:32:01 crc kubenswrapper[4664]: I1013 08:32:01.130355 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:32:01 crc kubenswrapper[4664]: I1013 08:32:01.130436 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:32:02 crc kubenswrapper[4664]: I1013 08:32:02.176658 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpv4r"] Oct 13 08:32:02 crc kubenswrapper[4664]: I1013 08:32:02.186636 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8mvvl" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="registry-server" probeResult="failure" output=< Oct 13 08:32:02 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:32:02 crc kubenswrapper[4664]: > Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.094574 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vpv4r" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="registry-server" containerID="cri-o://85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7" gracePeriod=2 Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.728637 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.855901 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qt229\" (UniqueName: \"kubernetes.io/projected/7fb8ac9e-9730-4917-9993-85e19ef980ee-kube-api-access-qt229\") pod \"7fb8ac9e-9730-4917-9993-85e19ef980ee\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.856009 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-catalog-content\") pod \"7fb8ac9e-9730-4917-9993-85e19ef980ee\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.856058 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-utilities\") pod \"7fb8ac9e-9730-4917-9993-85e19ef980ee\" (UID: \"7fb8ac9e-9730-4917-9993-85e19ef980ee\") " Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.857045 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-utilities" (OuterVolumeSpecName: "utilities") pod "7fb8ac9e-9730-4917-9993-85e19ef980ee" (UID: "7fb8ac9e-9730-4917-9993-85e19ef980ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.861243 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fb8ac9e-9730-4917-9993-85e19ef980ee-kube-api-access-qt229" (OuterVolumeSpecName: "kube-api-access-qt229") pod "7fb8ac9e-9730-4917-9993-85e19ef980ee" (UID: "7fb8ac9e-9730-4917-9993-85e19ef980ee"). InnerVolumeSpecName "kube-api-access-qt229". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.870597 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fb8ac9e-9730-4917-9993-85e19ef980ee" (UID: "7fb8ac9e-9730-4917-9993-85e19ef980ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.958093 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qt229\" (UniqueName: \"kubernetes.io/projected/7fb8ac9e-9730-4917-9993-85e19ef980ee-kube-api-access-qt229\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.958121 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:03 crc kubenswrapper[4664]: I1013 08:32:03.958131 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb8ac9e-9730-4917-9993-85e19ef980ee-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.107526 4664 generic.go:334] "Generic (PLEG): container finished" podID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerID="85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7" exitCode=0 Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.107595 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpv4r" event={"ID":"7fb8ac9e-9730-4917-9993-85e19ef980ee","Type":"ContainerDied","Data":"85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7"} Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.107615 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpv4r" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.107655 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpv4r" event={"ID":"7fb8ac9e-9730-4917-9993-85e19ef980ee","Type":"ContainerDied","Data":"59a45acf873a7bcfa4c15ac782818fc79a956e1852fc073fda6fbb22db1bec27"} Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.107681 4664 scope.go:117] "RemoveContainer" containerID="85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.135647 4664 scope.go:117] "RemoveContainer" containerID="bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.168891 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpv4r"] Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.180348 4664 scope.go:117] "RemoveContainer" containerID="975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.182543 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpv4r"] Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.241157 4664 scope.go:117] "RemoveContainer" containerID="85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7" Oct 13 08:32:04 crc kubenswrapper[4664]: E1013 08:32:04.241575 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7\": container with ID starting with 85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7 not found: ID does not exist" containerID="85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.241630 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7"} err="failed to get container status \"85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7\": rpc error: code = NotFound desc = could not find container \"85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7\": container with ID starting with 85f16130b0de87744fd0c340ebf2370145fad0a7db4ce4bb929733c0df8087c7 not found: ID does not exist" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.241668 4664 scope.go:117] "RemoveContainer" containerID="bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5" Oct 13 08:32:04 crc kubenswrapper[4664]: E1013 08:32:04.242083 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5\": container with ID starting with bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5 not found: ID does not exist" containerID="bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.242110 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5"} err="failed to get container status \"bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5\": rpc error: code = NotFound desc = could not find container \"bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5\": container with ID starting with bdc6d8134f9c689e1cb0fa095df369e2c8dfbc71487a5ba4067a615f795157d5 not found: ID does not exist" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.242131 4664 scope.go:117] "RemoveContainer" containerID="975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f" Oct 13 08:32:04 crc kubenswrapper[4664]: E1013 08:32:04.242493 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f\": container with ID starting with 975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f not found: ID does not exist" containerID="975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f" Oct 13 08:32:04 crc kubenswrapper[4664]: I1013 08:32:04.242526 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f"} err="failed to get container status \"975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f\": rpc error: code = NotFound desc = could not find container \"975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f\": container with ID starting with 975fd4c85cc974ef938aec2ca98a63879e8b6fa282b46998b8935beecde9d42f not found: ID does not exist" Oct 13 08:32:05 crc kubenswrapper[4664]: I1013 08:32:05.058254 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" path="/var/lib/kubelet/pods/7fb8ac9e-9730-4917-9993-85e19ef980ee/volumes" Oct 13 08:32:06 crc kubenswrapper[4664]: I1013 08:32:06.419474 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:32:06 crc kubenswrapper[4664]: I1013 08:32:06.505905 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:32:06 crc kubenswrapper[4664]: I1013 08:32:06.977888 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-smb66"] Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.147148 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-smb66" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="registry-server" containerID="cri-o://0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189" gracePeriod=2 Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.703969 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.768105 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-utilities\") pod \"4976b4bb-0dca-48bc-aabd-50d00c35696d\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.768302 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-catalog-content\") pod \"4976b4bb-0dca-48bc-aabd-50d00c35696d\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.768781 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-utilities" (OuterVolumeSpecName: "utilities") pod "4976b4bb-0dca-48bc-aabd-50d00c35696d" (UID: "4976b4bb-0dca-48bc-aabd-50d00c35696d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.771972 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vx4n\" (UniqueName: \"kubernetes.io/projected/4976b4bb-0dca-48bc-aabd-50d00c35696d-kube-api-access-2vx4n\") pod \"4976b4bb-0dca-48bc-aabd-50d00c35696d\" (UID: \"4976b4bb-0dca-48bc-aabd-50d00c35696d\") " Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.772895 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.784804 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4976b4bb-0dca-48bc-aabd-50d00c35696d-kube-api-access-2vx4n" (OuterVolumeSpecName: "kube-api-access-2vx4n") pod "4976b4bb-0dca-48bc-aabd-50d00c35696d" (UID: "4976b4bb-0dca-48bc-aabd-50d00c35696d"). InnerVolumeSpecName "kube-api-access-2vx4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.828894 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4976b4bb-0dca-48bc-aabd-50d00c35696d" (UID: "4976b4bb-0dca-48bc-aabd-50d00c35696d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.875321 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4976b4bb-0dca-48bc-aabd-50d00c35696d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:08 crc kubenswrapper[4664]: I1013 08:32:08.875373 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vx4n\" (UniqueName: \"kubernetes.io/projected/4976b4bb-0dca-48bc-aabd-50d00c35696d-kube-api-access-2vx4n\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.160768 4664 generic.go:334] "Generic (PLEG): container finished" podID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerID="0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189" exitCode=0 Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.160861 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smb66" event={"ID":"4976b4bb-0dca-48bc-aabd-50d00c35696d","Type":"ContainerDied","Data":"0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189"} Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.160906 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smb66" event={"ID":"4976b4bb-0dca-48bc-aabd-50d00c35696d","Type":"ContainerDied","Data":"2cfa2c03475c0c2a7ddac9172794d935a631cfc0002bc7473b168ddaa4cd2cb4"} Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.160934 4664 scope.go:117] "RemoveContainer" containerID="0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.161137 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smb66" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.193073 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-smb66"] Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.202849 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-smb66"] Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.204081 4664 scope.go:117] "RemoveContainer" containerID="75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.229269 4664 scope.go:117] "RemoveContainer" containerID="18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.279866 4664 scope.go:117] "RemoveContainer" containerID="0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189" Oct 13 08:32:09 crc kubenswrapper[4664]: E1013 08:32:09.280566 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189\": container with ID starting with 0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189 not found: ID does not exist" containerID="0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.280633 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189"} err="failed to get container status \"0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189\": rpc error: code = NotFound desc = could not find container \"0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189\": container with ID starting with 0319034f66e51dbf9721e7ac19691693a32e08e3c423dcde11df9c157b7dc189 not found: ID does not exist" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.280667 4664 scope.go:117] "RemoveContainer" containerID="75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece" Oct 13 08:32:09 crc kubenswrapper[4664]: E1013 08:32:09.281226 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece\": container with ID starting with 75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece not found: ID does not exist" containerID="75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.281302 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece"} err="failed to get container status \"75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece\": rpc error: code = NotFound desc = could not find container \"75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece\": container with ID starting with 75d730d65b527d63e5db185e7bad87c700c546cd9ccd51e05b299d8ec5a93ece not found: ID does not exist" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.281342 4664 scope.go:117] "RemoveContainer" containerID="18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43" Oct 13 08:32:09 crc kubenswrapper[4664]: E1013 08:32:09.281891 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43\": container with ID starting with 18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43 not found: ID does not exist" containerID="18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43" Oct 13 08:32:09 crc kubenswrapper[4664]: I1013 08:32:09.281932 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43"} err="failed to get container status \"18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43\": rpc error: code = NotFound desc = could not find container \"18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43\": container with ID starting with 18afe37ba847d7b9e2834c50a1d167bb442cf9e483b18d998db02eb64c39fd43 not found: ID does not exist" Oct 13 08:32:11 crc kubenswrapper[4664]: I1013 08:32:11.056863 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" path="/var/lib/kubelet/pods/4976b4bb-0dca-48bc-aabd-50d00c35696d/volumes" Oct 13 08:32:12 crc kubenswrapper[4664]: I1013 08:32:12.196091 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8mvvl" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="registry-server" probeResult="failure" output=< Oct 13 08:32:12 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:32:12 crc kubenswrapper[4664]: > Oct 13 08:32:22 crc kubenswrapper[4664]: I1013 08:32:22.178616 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8mvvl" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="registry-server" probeResult="failure" output=< Oct 13 08:32:22 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:32:22 crc kubenswrapper[4664]: > Oct 13 08:32:31 crc kubenswrapper[4664]: I1013 08:32:31.227250 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:32:31 crc kubenswrapper[4664]: I1013 08:32:31.310568 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:32:31 crc kubenswrapper[4664]: I1013 08:32:31.485188 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8mvvl"] Oct 13 08:32:32 crc kubenswrapper[4664]: I1013 08:32:32.453005 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8mvvl" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="registry-server" containerID="cri-o://661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2" gracePeriod=2 Oct 13 08:32:32 crc kubenswrapper[4664]: I1013 08:32:32.979918 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.090886 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-utilities\") pod \"6c0800bc-60c4-4638-a721-6de61f32a903\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.091199 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-catalog-content\") pod \"6c0800bc-60c4-4638-a721-6de61f32a903\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.091453 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kj55\" (UniqueName: \"kubernetes.io/projected/6c0800bc-60c4-4638-a721-6de61f32a903-kube-api-access-8kj55\") pod \"6c0800bc-60c4-4638-a721-6de61f32a903\" (UID: \"6c0800bc-60c4-4638-a721-6de61f32a903\") " Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.091662 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-utilities" (OuterVolumeSpecName: "utilities") pod "6c0800bc-60c4-4638-a721-6de61f32a903" (UID: "6c0800bc-60c4-4638-a721-6de61f32a903"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.092270 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.099064 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c0800bc-60c4-4638-a721-6de61f32a903-kube-api-access-8kj55" (OuterVolumeSpecName: "kube-api-access-8kj55") pod "6c0800bc-60c4-4638-a721-6de61f32a903" (UID: "6c0800bc-60c4-4638-a721-6de61f32a903"). InnerVolumeSpecName "kube-api-access-8kj55". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.176684 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6c0800bc-60c4-4638-a721-6de61f32a903" (UID: "6c0800bc-60c4-4638-a721-6de61f32a903"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.193578 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kj55\" (UniqueName: \"kubernetes.io/projected/6c0800bc-60c4-4638-a721-6de61f32a903-kube-api-access-8kj55\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.193616 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c0800bc-60c4-4638-a721-6de61f32a903-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.465089 4664 generic.go:334] "Generic (PLEG): container finished" podID="6c0800bc-60c4-4638-a721-6de61f32a903" containerID="661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2" exitCode=0 Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.465153 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8mvvl" event={"ID":"6c0800bc-60c4-4638-a721-6de61f32a903","Type":"ContainerDied","Data":"661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2"} Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.465196 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8mvvl" event={"ID":"6c0800bc-60c4-4638-a721-6de61f32a903","Type":"ContainerDied","Data":"5d8e42fccd99faeb678f681c0c9bad4a144aef2d4d4e488b35fb50777005a1e2"} Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.465226 4664 scope.go:117] "RemoveContainer" containerID="661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.465420 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8mvvl" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.495681 4664 scope.go:117] "RemoveContainer" containerID="ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.521097 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8mvvl"] Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.528632 4664 scope.go:117] "RemoveContainer" containerID="c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.535214 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8mvvl"] Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.566426 4664 scope.go:117] "RemoveContainer" containerID="661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2" Oct 13 08:32:33 crc kubenswrapper[4664]: E1013 08:32:33.566752 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2\": container with ID starting with 661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2 not found: ID does not exist" containerID="661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.566779 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2"} err="failed to get container status \"661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2\": rpc error: code = NotFound desc = could not find container \"661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2\": container with ID starting with 661ddecd11f4931160786b7f001f1cab097849381cc8ec2f130831f6cefadbb2 not found: ID does not exist" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.566812 4664 scope.go:117] "RemoveContainer" containerID="ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48" Oct 13 08:32:33 crc kubenswrapper[4664]: E1013 08:32:33.567263 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48\": container with ID starting with ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48 not found: ID does not exist" containerID="ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.567279 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48"} err="failed to get container status \"ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48\": rpc error: code = NotFound desc = could not find container \"ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48\": container with ID starting with ea80c7c540d324d52b8741bf1dfaa2d7e556e8ffa9ee96790a3548a12dd87f48 not found: ID does not exist" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.567291 4664 scope.go:117] "RemoveContainer" containerID="c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd" Oct 13 08:32:33 crc kubenswrapper[4664]: E1013 08:32:33.567483 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd\": container with ID starting with c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd not found: ID does not exist" containerID="c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd" Oct 13 08:32:33 crc kubenswrapper[4664]: I1013 08:32:33.567498 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd"} err="failed to get container status \"c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd\": rpc error: code = NotFound desc = could not find container \"c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd\": container with ID starting with c0389a520cfeac50d4f9810a35e3804af3b451ccdaa5bb9a621c9b0c8fed9fdd not found: ID does not exist" Oct 13 08:32:35 crc kubenswrapper[4664]: I1013 08:32:35.068611 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" path="/var/lib/kubelet/pods/6c0800bc-60c4-4638-a721-6de61f32a903/volumes" Oct 13 08:33:58 crc kubenswrapper[4664]: I1013 08:33:58.812630 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:33:58 crc kubenswrapper[4664]: I1013 08:33:58.813324 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:34:28 crc kubenswrapper[4664]: I1013 08:34:28.811602 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:34:28 crc kubenswrapper[4664]: I1013 08:34:28.812346 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:34:58 crc kubenswrapper[4664]: I1013 08:34:58.812424 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:34:58 crc kubenswrapper[4664]: I1013 08:34:58.812874 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:34:58 crc kubenswrapper[4664]: I1013 08:34:58.812932 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:34:58 crc kubenswrapper[4664]: I1013 08:34:58.813596 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5ab89ae3db5ae0813993b5f7f6735956eec58f1317f16bfb84369ef9eb071a54"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:34:58 crc kubenswrapper[4664]: I1013 08:34:58.813665 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://5ab89ae3db5ae0813993b5f7f6735956eec58f1317f16bfb84369ef9eb071a54" gracePeriod=600 Oct 13 08:34:59 crc kubenswrapper[4664]: I1013 08:34:59.056309 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="5ab89ae3db5ae0813993b5f7f6735956eec58f1317f16bfb84369ef9eb071a54" exitCode=0 Oct 13 08:34:59 crc kubenswrapper[4664]: I1013 08:34:59.066995 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"5ab89ae3db5ae0813993b5f7f6735956eec58f1317f16bfb84369ef9eb071a54"} Oct 13 08:34:59 crc kubenswrapper[4664]: I1013 08:34:59.067056 4664 scope.go:117] "RemoveContainer" containerID="35be61eb6684bd63d2365b362f3cd2e212a56cf5e7c7a23bcc4e23df86e37ccf" Oct 13 08:35:00 crc kubenswrapper[4664]: I1013 08:35:00.089359 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad"} Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.538657 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q9pwl"] Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539456 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="extract-content" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539468 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="extract-content" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539481 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="extract-content" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539488 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="extract-content" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539505 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539512 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539524 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="extract-utilities" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539530 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="extract-utilities" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539542 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539547 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539560 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539565 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539574 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="extract-utilities" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539581 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="extract-utilities" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539592 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="extract-utilities" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539599 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="extract-utilities" Oct 13 08:36:57 crc kubenswrapper[4664]: E1013 08:36:57.539610 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="extract-content" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539616 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="extract-content" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539777 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fb8ac9e-9730-4917-9993-85e19ef980ee" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539817 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c0800bc-60c4-4638-a721-6de61f32a903" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.539845 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="4976b4bb-0dca-48bc-aabd-50d00c35696d" containerName="registry-server" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.541186 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.551424 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q9pwl"] Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.638334 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vbmh\" (UniqueName: \"kubernetes.io/projected/90cbb009-9e6a-40ab-8620-295117ce63a0-kube-api-access-6vbmh\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.638703 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-utilities\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.638747 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-catalog-content\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.740632 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vbmh\" (UniqueName: \"kubernetes.io/projected/90cbb009-9e6a-40ab-8620-295117ce63a0-kube-api-access-6vbmh\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.740717 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-utilities\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.740760 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-catalog-content\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.741343 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-catalog-content\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.741832 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-utilities\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.764036 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vbmh\" (UniqueName: \"kubernetes.io/projected/90cbb009-9e6a-40ab-8620-295117ce63a0-kube-api-access-6vbmh\") pod \"certified-operators-q9pwl\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:57 crc kubenswrapper[4664]: I1013 08:36:57.874612 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:36:58 crc kubenswrapper[4664]: I1013 08:36:58.464329 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q9pwl"] Oct 13 08:36:59 crc kubenswrapper[4664]: I1013 08:36:59.391350 4664 generic.go:334] "Generic (PLEG): container finished" podID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerID="27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea" exitCode=0 Oct 13 08:36:59 crc kubenswrapper[4664]: I1013 08:36:59.391447 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9pwl" event={"ID":"90cbb009-9e6a-40ab-8620-295117ce63a0","Type":"ContainerDied","Data":"27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea"} Oct 13 08:36:59 crc kubenswrapper[4664]: I1013 08:36:59.391678 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9pwl" event={"ID":"90cbb009-9e6a-40ab-8620-295117ce63a0","Type":"ContainerStarted","Data":"d9e1c6438fa81517ebbb9896e4e7ffb915f192c3002bd83607cac9a0c0746e99"} Oct 13 08:36:59 crc kubenswrapper[4664]: I1013 08:36:59.394844 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:37:00 crc kubenswrapper[4664]: I1013 08:37:00.406416 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9pwl" event={"ID":"90cbb009-9e6a-40ab-8620-295117ce63a0","Type":"ContainerStarted","Data":"1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da"} Oct 13 08:37:02 crc kubenswrapper[4664]: I1013 08:37:02.438054 4664 generic.go:334] "Generic (PLEG): container finished" podID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerID="1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da" exitCode=0 Oct 13 08:37:02 crc kubenswrapper[4664]: I1013 08:37:02.438177 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9pwl" event={"ID":"90cbb009-9e6a-40ab-8620-295117ce63a0","Type":"ContainerDied","Data":"1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da"} Oct 13 08:37:03 crc kubenswrapper[4664]: I1013 08:37:03.453530 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9pwl" event={"ID":"90cbb009-9e6a-40ab-8620-295117ce63a0","Type":"ContainerStarted","Data":"04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd"} Oct 13 08:37:03 crc kubenswrapper[4664]: I1013 08:37:03.484541 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q9pwl" podStartSLOduration=2.972440044 podStartE2EDuration="6.484514729s" podCreationTimestamp="2025-10-13 08:36:57 +0000 UTC" firstStartedPulling="2025-10-13 08:36:59.394558687 +0000 UTC m=+6627.082003879" lastFinishedPulling="2025-10-13 08:37:02.906633372 +0000 UTC m=+6630.594078564" observedRunningTime="2025-10-13 08:37:03.47712837 +0000 UTC m=+6631.164573612" watchObservedRunningTime="2025-10-13 08:37:03.484514729 +0000 UTC m=+6631.171959961" Oct 13 08:37:07 crc kubenswrapper[4664]: I1013 08:37:07.876066 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:37:07 crc kubenswrapper[4664]: I1013 08:37:07.876615 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:37:08 crc kubenswrapper[4664]: I1013 08:37:08.958666 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-q9pwl" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="registry-server" probeResult="failure" output=< Oct 13 08:37:08 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:37:08 crc kubenswrapper[4664]: > Oct 13 08:37:17 crc kubenswrapper[4664]: I1013 08:37:17.941129 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:37:18 crc kubenswrapper[4664]: I1013 08:37:18.014393 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:37:18 crc kubenswrapper[4664]: I1013 08:37:18.195279 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q9pwl"] Oct 13 08:37:19 crc kubenswrapper[4664]: I1013 08:37:19.639195 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q9pwl" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="registry-server" containerID="cri-o://04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd" gracePeriod=2 Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.145719 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.257546 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-utilities\") pod \"90cbb009-9e6a-40ab-8620-295117ce63a0\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.257665 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-catalog-content\") pod \"90cbb009-9e6a-40ab-8620-295117ce63a0\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.257781 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vbmh\" (UniqueName: \"kubernetes.io/projected/90cbb009-9e6a-40ab-8620-295117ce63a0-kube-api-access-6vbmh\") pod \"90cbb009-9e6a-40ab-8620-295117ce63a0\" (UID: \"90cbb009-9e6a-40ab-8620-295117ce63a0\") " Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.259654 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-utilities" (OuterVolumeSpecName: "utilities") pod "90cbb009-9e6a-40ab-8620-295117ce63a0" (UID: "90cbb009-9e6a-40ab-8620-295117ce63a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.265568 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90cbb009-9e6a-40ab-8620-295117ce63a0-kube-api-access-6vbmh" (OuterVolumeSpecName: "kube-api-access-6vbmh") pod "90cbb009-9e6a-40ab-8620-295117ce63a0" (UID: "90cbb009-9e6a-40ab-8620-295117ce63a0"). InnerVolumeSpecName "kube-api-access-6vbmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.365830 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vbmh\" (UniqueName: \"kubernetes.io/projected/90cbb009-9e6a-40ab-8620-295117ce63a0-kube-api-access-6vbmh\") on node \"crc\" DevicePath \"\"" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.365857 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.376459 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90cbb009-9e6a-40ab-8620-295117ce63a0" (UID: "90cbb009-9e6a-40ab-8620-295117ce63a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.468301 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90cbb009-9e6a-40ab-8620-295117ce63a0-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.654598 4664 generic.go:334] "Generic (PLEG): container finished" podID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerID="04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd" exitCode=0 Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.654655 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q9pwl" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.654651 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9pwl" event={"ID":"90cbb009-9e6a-40ab-8620-295117ce63a0","Type":"ContainerDied","Data":"04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd"} Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.654705 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9pwl" event={"ID":"90cbb009-9e6a-40ab-8620-295117ce63a0","Type":"ContainerDied","Data":"d9e1c6438fa81517ebbb9896e4e7ffb915f192c3002bd83607cac9a0c0746e99"} Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.654726 4664 scope.go:117] "RemoveContainer" containerID="04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.697648 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q9pwl"] Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.707169 4664 scope.go:117] "RemoveContainer" containerID="1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.708589 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q9pwl"] Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.733901 4664 scope.go:117] "RemoveContainer" containerID="27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.767980 4664 scope.go:117] "RemoveContainer" containerID="04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd" Oct 13 08:37:20 crc kubenswrapper[4664]: E1013 08:37:20.768368 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd\": container with ID starting with 04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd not found: ID does not exist" containerID="04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.768428 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd"} err="failed to get container status \"04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd\": rpc error: code = NotFound desc = could not find container \"04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd\": container with ID starting with 04f04e87a33253f916e8abfb45dc94c6e875a1ef7ecfe1cff2db5cb0f6d9a5bd not found: ID does not exist" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.768449 4664 scope.go:117] "RemoveContainer" containerID="1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da" Oct 13 08:37:20 crc kubenswrapper[4664]: E1013 08:37:20.768756 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da\": container with ID starting with 1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da not found: ID does not exist" containerID="1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.768848 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da"} err="failed to get container status \"1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da\": rpc error: code = NotFound desc = could not find container \"1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da\": container with ID starting with 1f394838a1e3d813ab675bc967dd1fa73c04707bbb220c3a1178ef5fd6fb18da not found: ID does not exist" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.768884 4664 scope.go:117] "RemoveContainer" containerID="27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea" Oct 13 08:37:20 crc kubenswrapper[4664]: E1013 08:37:20.769335 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea\": container with ID starting with 27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea not found: ID does not exist" containerID="27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea" Oct 13 08:37:20 crc kubenswrapper[4664]: I1013 08:37:20.769354 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea"} err="failed to get container status \"27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea\": rpc error: code = NotFound desc = could not find container \"27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea\": container with ID starting with 27f2d0c7c0ba4f48a3768b0b8d7a3e44a2db11ae0a079b76739ea06faef887ea not found: ID does not exist" Oct 13 08:37:21 crc kubenswrapper[4664]: I1013 08:37:21.058077 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" path="/var/lib/kubelet/pods/90cbb009-9e6a-40ab-8620-295117ce63a0/volumes" Oct 13 08:37:28 crc kubenswrapper[4664]: I1013 08:37:28.812376 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:37:28 crc kubenswrapper[4664]: I1013 08:37:28.813005 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:37:58 crc kubenswrapper[4664]: I1013 08:37:58.812391 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:37:58 crc kubenswrapper[4664]: I1013 08:37:58.812910 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:38:28 crc kubenswrapper[4664]: I1013 08:38:28.811923 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:38:28 crc kubenswrapper[4664]: I1013 08:38:28.812501 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:38:28 crc kubenswrapper[4664]: I1013 08:38:28.812560 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:38:28 crc kubenswrapper[4664]: I1013 08:38:28.813391 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:38:28 crc kubenswrapper[4664]: I1013 08:38:28.813463 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" gracePeriod=600 Oct 13 08:38:28 crc kubenswrapper[4664]: E1013 08:38:28.940206 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:38:29 crc kubenswrapper[4664]: I1013 08:38:29.362939 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" exitCode=0 Oct 13 08:38:29 crc kubenswrapper[4664]: I1013 08:38:29.362995 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad"} Oct 13 08:38:29 crc kubenswrapper[4664]: I1013 08:38:29.363046 4664 scope.go:117] "RemoveContainer" containerID="5ab89ae3db5ae0813993b5f7f6735956eec58f1317f16bfb84369ef9eb071a54" Oct 13 08:38:29 crc kubenswrapper[4664]: I1013 08:38:29.363915 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:38:29 crc kubenswrapper[4664]: E1013 08:38:29.364372 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:38:40 crc kubenswrapper[4664]: I1013 08:38:40.047351 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:38:40 crc kubenswrapper[4664]: E1013 08:38:40.048519 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:38:52 crc kubenswrapper[4664]: I1013 08:38:52.047538 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:38:52 crc kubenswrapper[4664]: E1013 08:38:52.048183 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:39:05 crc kubenswrapper[4664]: I1013 08:39:05.047333 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:39:05 crc kubenswrapper[4664]: E1013 08:39:05.048075 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:39:17 crc kubenswrapper[4664]: I1013 08:39:17.047423 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:39:17 crc kubenswrapper[4664]: E1013 08:39:17.050059 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:39:28 crc kubenswrapper[4664]: I1013 08:39:28.047444 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:39:28 crc kubenswrapper[4664]: E1013 08:39:28.048328 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:39:40 crc kubenswrapper[4664]: I1013 08:39:40.047852 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:39:40 crc kubenswrapper[4664]: E1013 08:39:40.049037 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:39:51 crc kubenswrapper[4664]: I1013 08:39:51.047833 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:39:51 crc kubenswrapper[4664]: E1013 08:39:51.048961 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:40:06 crc kubenswrapper[4664]: I1013 08:40:06.047590 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:40:06 crc kubenswrapper[4664]: E1013 08:40:06.049210 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:40:18 crc kubenswrapper[4664]: I1013 08:40:18.047731 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:40:18 crc kubenswrapper[4664]: E1013 08:40:18.048428 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:40:29 crc kubenswrapper[4664]: I1013 08:40:29.048632 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:40:29 crc kubenswrapper[4664]: E1013 08:40:29.049714 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:40:43 crc kubenswrapper[4664]: I1013 08:40:43.054510 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:40:43 crc kubenswrapper[4664]: E1013 08:40:43.055224 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:40:57 crc kubenswrapper[4664]: I1013 08:40:57.046468 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:40:57 crc kubenswrapper[4664]: E1013 08:40:57.047228 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.465352 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-86c94b664f-jbp5z"] Oct 13 08:41:02 crc kubenswrapper[4664]: E1013 08:41:02.466446 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="registry-server" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.466544 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="registry-server" Oct 13 08:41:02 crc kubenswrapper[4664]: E1013 08:41:02.466560 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="extract-content" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.466568 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="extract-content" Oct 13 08:41:02 crc kubenswrapper[4664]: E1013 08:41:02.466599 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="extract-utilities" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.466610 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="extract-utilities" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.466876 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="90cbb009-9e6a-40ab-8620-295117ce63a0" containerName="registry-server" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.468266 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.544560 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86c94b664f-jbp5z"] Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.582256 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-public-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.582371 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-internal-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.582397 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-httpd-config\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.582425 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-config\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.582461 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlbpp\" (UniqueName: \"kubernetes.io/projected/086d544e-1901-4bfc-b170-54ac640f25ee-kube-api-access-zlbpp\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.582588 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-ovndb-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.582682 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-combined-ca-bundle\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.684895 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlbpp\" (UniqueName: \"kubernetes.io/projected/086d544e-1901-4bfc-b170-54ac640f25ee-kube-api-access-zlbpp\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.684977 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-ovndb-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.685023 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-combined-ca-bundle\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.685070 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-public-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.685118 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-internal-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.685136 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-httpd-config\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.685162 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-config\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.706757 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-public-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.707447 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-config\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.708489 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-combined-ca-bundle\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.710422 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-httpd-config\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.714335 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-internal-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.717493 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/086d544e-1901-4bfc-b170-54ac640f25ee-ovndb-tls-certs\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.744439 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlbpp\" (UniqueName: \"kubernetes.io/projected/086d544e-1901-4bfc-b170-54ac640f25ee-kube-api-access-zlbpp\") pod \"neutron-86c94b664f-jbp5z\" (UID: \"086d544e-1901-4bfc-b170-54ac640f25ee\") " pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:02 crc kubenswrapper[4664]: I1013 08:41:02.784875 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:04 crc kubenswrapper[4664]: I1013 08:41:04.132154 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86c94b664f-jbp5z"] Oct 13 08:41:05 crc kubenswrapper[4664]: I1013 08:41:05.059981 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86c94b664f-jbp5z" event={"ID":"086d544e-1901-4bfc-b170-54ac640f25ee","Type":"ContainerStarted","Data":"59c64d47a76ccd249d12f5429cbd1e3ce21f7b366ddeba1db7b8311835c33493"} Oct 13 08:41:05 crc kubenswrapper[4664]: I1013 08:41:05.060822 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:05 crc kubenswrapper[4664]: I1013 08:41:05.060968 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86c94b664f-jbp5z" event={"ID":"086d544e-1901-4bfc-b170-54ac640f25ee","Type":"ContainerStarted","Data":"74de19806a89628fc8305fc738aa1fc6eb9284276a3b308a158445eb8cbeb705"} Oct 13 08:41:05 crc kubenswrapper[4664]: I1013 08:41:05.061100 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86c94b664f-jbp5z" event={"ID":"086d544e-1901-4bfc-b170-54ac640f25ee","Type":"ContainerStarted","Data":"0bd10880756cc111dc781f71e0837a5b2ced59a2988ef65f4cdb715b4b06b93f"} Oct 13 08:41:05 crc kubenswrapper[4664]: I1013 08:41:05.088093 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-86c94b664f-jbp5z" podStartSLOduration=3.088062784 podStartE2EDuration="3.088062784s" podCreationTimestamp="2025-10-13 08:41:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 08:41:05.087293353 +0000 UTC m=+6872.774738545" watchObservedRunningTime="2025-10-13 08:41:05.088062784 +0000 UTC m=+6872.775507986" Oct 13 08:41:11 crc kubenswrapper[4664]: I1013 08:41:11.048383 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:41:11 crc kubenswrapper[4664]: E1013 08:41:11.049380 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:41:22 crc kubenswrapper[4664]: I1013 08:41:22.047014 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:41:22 crc kubenswrapper[4664]: E1013 08:41:22.049676 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:41:32 crc kubenswrapper[4664]: I1013 08:41:32.808528 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-86c94b664f-jbp5z" Oct 13 08:41:32 crc kubenswrapper[4664]: I1013 08:41:32.912092 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-69d56d54f7-4ft86"] Oct 13 08:41:32 crc kubenswrapper[4664]: I1013 08:41:32.912360 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-69d56d54f7-4ft86" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-api" containerID="cri-o://18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2" gracePeriod=30 Oct 13 08:41:32 crc kubenswrapper[4664]: I1013 08:41:32.912507 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-69d56d54f7-4ft86" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-httpd" containerID="cri-o://d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353" gracePeriod=30 Oct 13 08:41:33 crc kubenswrapper[4664]: I1013 08:41:33.361432 4664 generic.go:334] "Generic (PLEG): container finished" podID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerID="d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353" exitCode=0 Oct 13 08:41:33 crc kubenswrapper[4664]: I1013 08:41:33.361527 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69d56d54f7-4ft86" event={"ID":"933ab1aa-594f-4932-8c9c-d42473d443fb","Type":"ContainerDied","Data":"d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353"} Oct 13 08:41:35 crc kubenswrapper[4664]: E1013 08:41:35.757415 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933ab1aa_594f_4932_8c9c_d42473d443fb.slice/crio-conmon-d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353.scope\": RecentStats: unable to find data in memory cache]" Oct 13 08:41:37 crc kubenswrapper[4664]: I1013 08:41:37.048104 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:41:37 crc kubenswrapper[4664]: E1013 08:41:37.049313 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:41:42 crc kubenswrapper[4664]: I1013 08:41:42.480341 4664 generic.go:334] "Generic (PLEG): container finished" podID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerID="18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2" exitCode=0 Oct 13 08:41:42 crc kubenswrapper[4664]: I1013 08:41:42.480436 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69d56d54f7-4ft86" event={"ID":"933ab1aa-594f-4932-8c9c-d42473d443fb","Type":"ContainerDied","Data":"18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2"} Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.234552 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.350411 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-internal-tls-certs\") pod \"933ab1aa-594f-4932-8c9c-d42473d443fb\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.350501 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8mkg\" (UniqueName: \"kubernetes.io/projected/933ab1aa-594f-4932-8c9c-d42473d443fb-kube-api-access-p8mkg\") pod \"933ab1aa-594f-4932-8c9c-d42473d443fb\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.350626 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-httpd-config\") pod \"933ab1aa-594f-4932-8c9c-d42473d443fb\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.350715 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-ovndb-tls-certs\") pod \"933ab1aa-594f-4932-8c9c-d42473d443fb\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.350768 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-config\") pod \"933ab1aa-594f-4932-8c9c-d42473d443fb\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.350867 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-public-tls-certs\") pod \"933ab1aa-594f-4932-8c9c-d42473d443fb\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.350900 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-combined-ca-bundle\") pod \"933ab1aa-594f-4932-8c9c-d42473d443fb\" (UID: \"933ab1aa-594f-4932-8c9c-d42473d443fb\") " Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.364222 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/933ab1aa-594f-4932-8c9c-d42473d443fb-kube-api-access-p8mkg" (OuterVolumeSpecName: "kube-api-access-p8mkg") pod "933ab1aa-594f-4932-8c9c-d42473d443fb" (UID: "933ab1aa-594f-4932-8c9c-d42473d443fb"). InnerVolumeSpecName "kube-api-access-p8mkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.364273 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "933ab1aa-594f-4932-8c9c-d42473d443fb" (UID: "933ab1aa-594f-4932-8c9c-d42473d443fb"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.426138 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-config" (OuterVolumeSpecName: "config") pod "933ab1aa-594f-4932-8c9c-d42473d443fb" (UID: "933ab1aa-594f-4932-8c9c-d42473d443fb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.427150 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "933ab1aa-594f-4932-8c9c-d42473d443fb" (UID: "933ab1aa-594f-4932-8c9c-d42473d443fb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.436647 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "933ab1aa-594f-4932-8c9c-d42473d443fb" (UID: "933ab1aa-594f-4932-8c9c-d42473d443fb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.447303 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "933ab1aa-594f-4932-8c9c-d42473d443fb" (UID: "933ab1aa-594f-4932-8c9c-d42473d443fb"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.449517 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "933ab1aa-594f-4932-8c9c-d42473d443fb" (UID: "933ab1aa-594f-4932-8c9c-d42473d443fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.454296 4664 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.454334 4664 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-config\") on node \"crc\" DevicePath \"\"" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.454349 4664 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.454363 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.454375 4664 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.454390 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8mkg\" (UniqueName: \"kubernetes.io/projected/933ab1aa-594f-4932-8c9c-d42473d443fb-kube-api-access-p8mkg\") on node \"crc\" DevicePath \"\"" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.454402 4664 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/933ab1aa-594f-4932-8c9c-d42473d443fb-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.493309 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69d56d54f7-4ft86" event={"ID":"933ab1aa-594f-4932-8c9c-d42473d443fb","Type":"ContainerDied","Data":"fcd1b850cc69c5a5c328bc05f747bcdb422662c87a85a9697f868b9dbc8ffaaa"} Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.493361 4664 scope.go:117] "RemoveContainer" containerID="d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.493443 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69d56d54f7-4ft86" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.558983 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-69d56d54f7-4ft86"] Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.562276 4664 scope.go:117] "RemoveContainer" containerID="18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.565688 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-69d56d54f7-4ft86"] Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.565720 4664 scope.go:117] "RemoveContainer" containerID="18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2" Oct 13 08:41:43 crc kubenswrapper[4664]: E1013 08:41:43.603390 4664 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_neutron-api_neutron-69d56d54f7-4ft86_openstack_933ab1aa-594f-4932-8c9c-d42473d443fb_0 in pod sandbox fcd1b850cc69c5a5c328bc05f747bcdb422662c87a85a9697f868b9dbc8ffaaa: identifier is not a container" containerID="18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2" Oct 13 08:41:43 crc kubenswrapper[4664]: I1013 08:41:43.603450 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18db0d78c726908e89c9040633495e8f255327b01915587289e4a567fdead1a2"} err="rpc error: code = Unknown desc = failed to delete container k8s_neutron-api_neutron-69d56d54f7-4ft86_openstack_933ab1aa-594f-4932-8c9c-d42473d443fb_0 in pod sandbox fcd1b850cc69c5a5c328bc05f747bcdb422662c87a85a9697f868b9dbc8ffaaa: identifier is not a container" Oct 13 08:41:45 crc kubenswrapper[4664]: I1013 08:41:45.059551 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" path="/var/lib/kubelet/pods/933ab1aa-594f-4932-8c9c-d42473d443fb/volumes" Oct 13 08:41:46 crc kubenswrapper[4664]: E1013 08:41:46.020284 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933ab1aa_594f_4932_8c9c_d42473d443fb.slice/crio-conmon-d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353.scope\": RecentStats: unable to find data in memory cache]" Oct 13 08:41:52 crc kubenswrapper[4664]: I1013 08:41:52.047298 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:41:52 crc kubenswrapper[4664]: E1013 08:41:52.048195 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:41:56 crc kubenswrapper[4664]: E1013 08:41:56.280653 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933ab1aa_594f_4932_8c9c_d42473d443fb.slice/crio-conmon-d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353.scope\": RecentStats: unable to find data in memory cache]" Oct 13 08:42:06 crc kubenswrapper[4664]: E1013 08:42:06.568415 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933ab1aa_594f_4932_8c9c_d42473d443fb.slice/crio-conmon-d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353.scope\": RecentStats: unable to find data in memory cache]" Oct 13 08:42:07 crc kubenswrapper[4664]: I1013 08:42:07.047513 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:42:07 crc kubenswrapper[4664]: E1013 08:42:07.047758 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:42:16 crc kubenswrapper[4664]: E1013 08:42:16.866322 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933ab1aa_594f_4932_8c9c_d42473d443fb.slice/crio-conmon-d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353.scope\": RecentStats: unable to find data in memory cache]" Oct 13 08:42:21 crc kubenswrapper[4664]: I1013 08:42:21.048113 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:42:21 crc kubenswrapper[4664]: E1013 08:42:21.049046 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:42:27 crc kubenswrapper[4664]: E1013 08:42:27.104942 4664 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933ab1aa_594f_4932_8c9c_d42473d443fb.slice/crio-conmon-d616fb174966d4ae2edf38282beab33c02d83f1429d06ce272bf0d8ad59e4353.scope\": RecentStats: unable to find data in memory cache]" Oct 13 08:42:32 crc kubenswrapper[4664]: I1013 08:42:32.966281 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-95cqr"] Oct 13 08:42:32 crc kubenswrapper[4664]: E1013 08:42:32.967294 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-api" Oct 13 08:42:32 crc kubenswrapper[4664]: I1013 08:42:32.967309 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-api" Oct 13 08:42:32 crc kubenswrapper[4664]: E1013 08:42:32.967330 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-httpd" Oct 13 08:42:32 crc kubenswrapper[4664]: I1013 08:42:32.967340 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-httpd" Oct 13 08:42:32 crc kubenswrapper[4664]: I1013 08:42:32.967535 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-httpd" Oct 13 08:42:32 crc kubenswrapper[4664]: I1013 08:42:32.967548 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="933ab1aa-594f-4932-8c9c-d42473d443fb" containerName="neutron-api" Oct 13 08:42:32 crc kubenswrapper[4664]: I1013 08:42:32.969039 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:32 crc kubenswrapper[4664]: I1013 08:42:32.981441 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-95cqr"] Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.027702 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-catalog-content\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.027756 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-utilities\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.027865 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrctg\" (UniqueName: \"kubernetes.io/projected/a8918892-d221-4cd7-846a-2d49ea93b403-kube-api-access-hrctg\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.129673 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrctg\" (UniqueName: \"kubernetes.io/projected/a8918892-d221-4cd7-846a-2d49ea93b403-kube-api-access-hrctg\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.129840 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-catalog-content\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.129867 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-utilities\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.130384 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-utilities\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.130459 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-catalog-content\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.155209 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrctg\" (UniqueName: \"kubernetes.io/projected/a8918892-d221-4cd7-846a-2d49ea93b403-kube-api-access-hrctg\") pod \"redhat-operators-95cqr\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.311316 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:33 crc kubenswrapper[4664]: I1013 08:42:33.829885 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-95cqr"] Oct 13 08:42:34 crc kubenswrapper[4664]: I1013 08:42:34.028717 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerStarted","Data":"a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5"} Oct 13 08:42:34 crc kubenswrapper[4664]: I1013 08:42:34.029140 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerStarted","Data":"ede05a0437e553629c1ab4a0dcf77d1535c18e2b307601da999aca379fa2bdfc"} Oct 13 08:42:35 crc kubenswrapper[4664]: I1013 08:42:35.046614 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:42:35 crc kubenswrapper[4664]: I1013 08:42:35.046989 4664 generic.go:334] "Generic (PLEG): container finished" podID="a8918892-d221-4cd7-846a-2d49ea93b403" containerID="a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5" exitCode=0 Oct 13 08:42:35 crc kubenswrapper[4664]: E1013 08:42:35.047373 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:42:35 crc kubenswrapper[4664]: I1013 08:42:35.048751 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:42:35 crc kubenswrapper[4664]: I1013 08:42:35.060529 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerDied","Data":"a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5"} Oct 13 08:42:37 crc kubenswrapper[4664]: I1013 08:42:37.063487 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerStarted","Data":"0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860"} Oct 13 08:42:40 crc kubenswrapper[4664]: I1013 08:42:40.107972 4664 generic.go:334] "Generic (PLEG): container finished" podID="a8918892-d221-4cd7-846a-2d49ea93b403" containerID="0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860" exitCode=0 Oct 13 08:42:40 crc kubenswrapper[4664]: I1013 08:42:40.108067 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerDied","Data":"0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860"} Oct 13 08:42:41 crc kubenswrapper[4664]: I1013 08:42:41.121329 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerStarted","Data":"80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6"} Oct 13 08:42:41 crc kubenswrapper[4664]: I1013 08:42:41.151736 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-95cqr" podStartSLOduration=3.4758778120000002 podStartE2EDuration="9.151717348s" podCreationTimestamp="2025-10-13 08:42:32 +0000 UTC" firstStartedPulling="2025-10-13 08:42:35.048482199 +0000 UTC m=+6962.735927411" lastFinishedPulling="2025-10-13 08:42:40.724321765 +0000 UTC m=+6968.411766947" observedRunningTime="2025-10-13 08:42:41.141590336 +0000 UTC m=+6968.829035538" watchObservedRunningTime="2025-10-13 08:42:41.151717348 +0000 UTC m=+6968.839162550" Oct 13 08:42:43 crc kubenswrapper[4664]: I1013 08:42:43.312517 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:43 crc kubenswrapper[4664]: I1013 08:42:43.312855 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:42:44 crc kubenswrapper[4664]: I1013 08:42:44.355354 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-95cqr" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="registry-server" probeResult="failure" output=< Oct 13 08:42:44 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:42:44 crc kubenswrapper[4664]: > Oct 13 08:42:46 crc kubenswrapper[4664]: I1013 08:42:46.047284 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:42:46 crc kubenswrapper[4664]: E1013 08:42:46.047845 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:42:54 crc kubenswrapper[4664]: I1013 08:42:54.358355 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-95cqr" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="registry-server" probeResult="failure" output=< Oct 13 08:42:54 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:42:54 crc kubenswrapper[4664]: > Oct 13 08:43:01 crc kubenswrapper[4664]: I1013 08:43:01.047459 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:43:01 crc kubenswrapper[4664]: E1013 08:43:01.048259 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:43:04 crc kubenswrapper[4664]: I1013 08:43:04.358981 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-95cqr" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="registry-server" probeResult="failure" output=< Oct 13 08:43:04 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:43:04 crc kubenswrapper[4664]: > Oct 13 08:43:13 crc kubenswrapper[4664]: I1013 08:43:13.060659 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:43:13 crc kubenswrapper[4664]: E1013 08:43:13.061904 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:43:13 crc kubenswrapper[4664]: I1013 08:43:13.403141 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:43:13 crc kubenswrapper[4664]: I1013 08:43:13.476933 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:43:13 crc kubenswrapper[4664]: I1013 08:43:13.643686 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-95cqr"] Oct 13 08:43:14 crc kubenswrapper[4664]: I1013 08:43:14.469534 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-95cqr" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="registry-server" containerID="cri-o://80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6" gracePeriod=2 Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.284606 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.399014 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-catalog-content\") pod \"a8918892-d221-4cd7-846a-2d49ea93b403\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.399141 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-utilities\") pod \"a8918892-d221-4cd7-846a-2d49ea93b403\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.399241 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrctg\" (UniqueName: \"kubernetes.io/projected/a8918892-d221-4cd7-846a-2d49ea93b403-kube-api-access-hrctg\") pod \"a8918892-d221-4cd7-846a-2d49ea93b403\" (UID: \"a8918892-d221-4cd7-846a-2d49ea93b403\") " Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.401472 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-utilities" (OuterVolumeSpecName: "utilities") pod "a8918892-d221-4cd7-846a-2d49ea93b403" (UID: "a8918892-d221-4cd7-846a-2d49ea93b403"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.411164 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8918892-d221-4cd7-846a-2d49ea93b403-kube-api-access-hrctg" (OuterVolumeSpecName: "kube-api-access-hrctg") pod "a8918892-d221-4cd7-846a-2d49ea93b403" (UID: "a8918892-d221-4cd7-846a-2d49ea93b403"). InnerVolumeSpecName "kube-api-access-hrctg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.480484 4664 generic.go:334] "Generic (PLEG): container finished" podID="a8918892-d221-4cd7-846a-2d49ea93b403" containerID="80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6" exitCode=0 Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.480548 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerDied","Data":"80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6"} Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.480582 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-95cqr" event={"ID":"a8918892-d221-4cd7-846a-2d49ea93b403","Type":"ContainerDied","Data":"ede05a0437e553629c1ab4a0dcf77d1535c18e2b307601da999aca379fa2bdfc"} Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.480605 4664 scope.go:117] "RemoveContainer" containerID="80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.481378 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-95cqr" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.501518 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.501551 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrctg\" (UniqueName: \"kubernetes.io/projected/a8918892-d221-4cd7-846a-2d49ea93b403-kube-api-access-hrctg\") on node \"crc\" DevicePath \"\"" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.512903 4664 scope.go:117] "RemoveContainer" containerID="0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.553237 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a8918892-d221-4cd7-846a-2d49ea93b403" (UID: "a8918892-d221-4cd7-846a-2d49ea93b403"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.554654 4664 scope.go:117] "RemoveContainer" containerID="a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.598677 4664 scope.go:117] "RemoveContainer" containerID="80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6" Oct 13 08:43:15 crc kubenswrapper[4664]: E1013 08:43:15.599237 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6\": container with ID starting with 80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6 not found: ID does not exist" containerID="80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.599277 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6"} err="failed to get container status \"80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6\": rpc error: code = NotFound desc = could not find container \"80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6\": container with ID starting with 80bfbbdbd7c61bab7fdd25c7bb3f5e9df3e4884909301c85f798eccd9c4201a6 not found: ID does not exist" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.599301 4664 scope.go:117] "RemoveContainer" containerID="0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860" Oct 13 08:43:15 crc kubenswrapper[4664]: E1013 08:43:15.599661 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860\": container with ID starting with 0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860 not found: ID does not exist" containerID="0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.599706 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860"} err="failed to get container status \"0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860\": rpc error: code = NotFound desc = could not find container \"0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860\": container with ID starting with 0497293f9675a1ee1d1ef75be29f60885be6b77dd9aad13b03b0f038a1af7860 not found: ID does not exist" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.599720 4664 scope.go:117] "RemoveContainer" containerID="a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5" Oct 13 08:43:15 crc kubenswrapper[4664]: E1013 08:43:15.600040 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5\": container with ID starting with a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5 not found: ID does not exist" containerID="a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.600062 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5"} err="failed to get container status \"a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5\": rpc error: code = NotFound desc = could not find container \"a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5\": container with ID starting with a83f59a7df5f8f0fed3b1af51d877eadb48f0cb2871b338d8aa8c0b0ab44f6d5 not found: ID does not exist" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.603357 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8918892-d221-4cd7-846a-2d49ea93b403-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.828825 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-95cqr"] Oct 13 08:43:15 crc kubenswrapper[4664]: I1013 08:43:15.838383 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-95cqr"] Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.466181 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9g8v6"] Oct 13 08:43:16 crc kubenswrapper[4664]: E1013 08:43:16.469965 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="extract-content" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.470016 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="extract-content" Oct 13 08:43:16 crc kubenswrapper[4664]: E1013 08:43:16.470041 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="registry-server" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.470075 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="registry-server" Oct 13 08:43:16 crc kubenswrapper[4664]: E1013 08:43:16.470137 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="extract-utilities" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.470150 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="extract-utilities" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.470579 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" containerName="registry-server" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.473089 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.485241 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9g8v6"] Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.622492 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-catalog-content\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.622633 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc65t\" (UniqueName: \"kubernetes.io/projected/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-kube-api-access-jc65t\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.622870 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-utilities\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.725456 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc65t\" (UniqueName: \"kubernetes.io/projected/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-kube-api-access-jc65t\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.725939 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-utilities\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.726067 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-catalog-content\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.726687 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-utilities\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.726702 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-catalog-content\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.756401 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc65t\" (UniqueName: \"kubernetes.io/projected/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-kube-api-access-jc65t\") pod \"redhat-marketplace-9g8v6\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:16 crc kubenswrapper[4664]: I1013 08:43:16.796079 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:17 crc kubenswrapper[4664]: I1013 08:43:17.076376 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8918892-d221-4cd7-846a-2d49ea93b403" path="/var/lib/kubelet/pods/a8918892-d221-4cd7-846a-2d49ea93b403/volumes" Oct 13 08:43:17 crc kubenswrapper[4664]: I1013 08:43:17.350400 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9g8v6"] Oct 13 08:43:17 crc kubenswrapper[4664]: I1013 08:43:17.508325 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9g8v6" event={"ID":"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3","Type":"ContainerStarted","Data":"42e2e4f49e7fe81600d77f759d251b39631414d6352373eb7ccfa2043b58d3bf"} Oct 13 08:43:18 crc kubenswrapper[4664]: I1013 08:43:18.520446 4664 generic.go:334] "Generic (PLEG): container finished" podID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerID="939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091" exitCode=0 Oct 13 08:43:18 crc kubenswrapper[4664]: I1013 08:43:18.520894 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9g8v6" event={"ID":"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3","Type":"ContainerDied","Data":"939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091"} Oct 13 08:43:20 crc kubenswrapper[4664]: I1013 08:43:20.546386 4664 generic.go:334] "Generic (PLEG): container finished" podID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerID="f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3" exitCode=0 Oct 13 08:43:20 crc kubenswrapper[4664]: I1013 08:43:20.546439 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9g8v6" event={"ID":"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3","Type":"ContainerDied","Data":"f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3"} Oct 13 08:43:21 crc kubenswrapper[4664]: I1013 08:43:21.559467 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9g8v6" event={"ID":"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3","Type":"ContainerStarted","Data":"25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9"} Oct 13 08:43:21 crc kubenswrapper[4664]: I1013 08:43:21.594673 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9g8v6" podStartSLOduration=3.095866013 podStartE2EDuration="5.594652479s" podCreationTimestamp="2025-10-13 08:43:16 +0000 UTC" firstStartedPulling="2025-10-13 08:43:18.523285088 +0000 UTC m=+7006.210730290" lastFinishedPulling="2025-10-13 08:43:21.022071564 +0000 UTC m=+7008.709516756" observedRunningTime="2025-10-13 08:43:21.58576334 +0000 UTC m=+7009.273208562" watchObservedRunningTime="2025-10-13 08:43:21.594652479 +0000 UTC m=+7009.282097681" Oct 13 08:43:24 crc kubenswrapper[4664]: I1013 08:43:24.048011 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:43:24 crc kubenswrapper[4664]: E1013 08:43:24.048586 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:43:26 crc kubenswrapper[4664]: I1013 08:43:26.797239 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:26 crc kubenswrapper[4664]: I1013 08:43:26.797658 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:26 crc kubenswrapper[4664]: I1013 08:43:26.865086 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:27 crc kubenswrapper[4664]: I1013 08:43:27.695018 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:27 crc kubenswrapper[4664]: I1013 08:43:27.765295 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9g8v6"] Oct 13 08:43:29 crc kubenswrapper[4664]: I1013 08:43:29.663168 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9g8v6" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="registry-server" containerID="cri-o://25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9" gracePeriod=2 Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.126924 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.277137 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-catalog-content\") pod \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.277206 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-utilities\") pod \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.277304 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jc65t\" (UniqueName: \"kubernetes.io/projected/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-kube-api-access-jc65t\") pod \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\" (UID: \"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3\") " Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.278294 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-utilities" (OuterVolumeSpecName: "utilities") pod "3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" (UID: "3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.283415 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-kube-api-access-jc65t" (OuterVolumeSpecName: "kube-api-access-jc65t") pod "3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" (UID: "3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3"). InnerVolumeSpecName "kube-api-access-jc65t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.290764 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" (UID: "3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.379571 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jc65t\" (UniqueName: \"kubernetes.io/projected/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-kube-api-access-jc65t\") on node \"crc\" DevicePath \"\"" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.379853 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.379949 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.680638 4664 generic.go:334] "Generic (PLEG): container finished" podID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerID="25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9" exitCode=0 Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.680924 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9g8v6" event={"ID":"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3","Type":"ContainerDied","Data":"25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9"} Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.681104 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9g8v6" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.681127 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9g8v6" event={"ID":"3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3","Type":"ContainerDied","Data":"42e2e4f49e7fe81600d77f759d251b39631414d6352373eb7ccfa2043b58d3bf"} Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.681172 4664 scope.go:117] "RemoveContainer" containerID="25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.739551 4664 scope.go:117] "RemoveContainer" containerID="f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.747014 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9g8v6"] Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.760499 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9g8v6"] Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.767912 4664 scope.go:117] "RemoveContainer" containerID="939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.819465 4664 scope.go:117] "RemoveContainer" containerID="25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9" Oct 13 08:43:30 crc kubenswrapper[4664]: E1013 08:43:30.819892 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9\": container with ID starting with 25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9 not found: ID does not exist" containerID="25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.819999 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9"} err="failed to get container status \"25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9\": rpc error: code = NotFound desc = could not find container \"25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9\": container with ID starting with 25ee4fd436265d9cb14db37f8301539f24065bc0f12120741d2e2f9f4cba76c9 not found: ID does not exist" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.820097 4664 scope.go:117] "RemoveContainer" containerID="f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3" Oct 13 08:43:30 crc kubenswrapper[4664]: E1013 08:43:30.820571 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3\": container with ID starting with f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3 not found: ID does not exist" containerID="f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.820651 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3"} err="failed to get container status \"f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3\": rpc error: code = NotFound desc = could not find container \"f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3\": container with ID starting with f87f6710eaa4ad1d753736b17a0cda9f0ebe8ce810a78333d1292d733c5bfcd3 not found: ID does not exist" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.820714 4664 scope.go:117] "RemoveContainer" containerID="939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091" Oct 13 08:43:30 crc kubenswrapper[4664]: E1013 08:43:30.821086 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091\": container with ID starting with 939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091 not found: ID does not exist" containerID="939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091" Oct 13 08:43:30 crc kubenswrapper[4664]: I1013 08:43:30.821154 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091"} err="failed to get container status \"939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091\": rpc error: code = NotFound desc = could not find container \"939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091\": container with ID starting with 939a728443b9c8dc8b9e18fdac6b31e50098cfcea2a84ae51c0885da9798c091 not found: ID does not exist" Oct 13 08:43:31 crc kubenswrapper[4664]: I1013 08:43:31.062608 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" path="/var/lib/kubelet/pods/3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3/volumes" Oct 13 08:43:37 crc kubenswrapper[4664]: I1013 08:43:37.046588 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:43:37 crc kubenswrapper[4664]: I1013 08:43:37.777324 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"866aea13c211f78eaa8e4d726e6114b7e27a73e61908a804ea8d0c909dc1a765"} Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.345001 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f"] Oct 13 08:45:00 crc kubenswrapper[4664]: E1013 08:45:00.346036 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="registry-server" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.346054 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="registry-server" Oct 13 08:45:00 crc kubenswrapper[4664]: E1013 08:45:00.346072 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="extract-content" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.346079 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="extract-content" Oct 13 08:45:00 crc kubenswrapper[4664]: E1013 08:45:00.346124 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="extract-utilities" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.346132 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="extract-utilities" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.346369 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cb8452c-5f10-491e-9a8c-6fd8a94cfcb3" containerName="registry-server" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.347195 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.365095 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f"] Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.401779 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.401937 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.525871 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ca8a96f-296c-4c3a-be7d-79deed75aa63-config-volume\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.526322 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ca8a96f-296c-4c3a-be7d-79deed75aa63-secret-volume\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.526549 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-652bd\" (UniqueName: \"kubernetes.io/projected/3ca8a96f-296c-4c3a-be7d-79deed75aa63-kube-api-access-652bd\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.628267 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ca8a96f-296c-4c3a-be7d-79deed75aa63-secret-volume\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.628412 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-652bd\" (UniqueName: \"kubernetes.io/projected/3ca8a96f-296c-4c3a-be7d-79deed75aa63-kube-api-access-652bd\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.628447 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ca8a96f-296c-4c3a-be7d-79deed75aa63-config-volume\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.629698 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ca8a96f-296c-4c3a-be7d-79deed75aa63-config-volume\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.651462 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-652bd\" (UniqueName: \"kubernetes.io/projected/3ca8a96f-296c-4c3a-be7d-79deed75aa63-kube-api-access-652bd\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.651629 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ca8a96f-296c-4c3a-be7d-79deed75aa63-secret-volume\") pod \"collect-profiles-29339085-8447f\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:00 crc kubenswrapper[4664]: I1013 08:45:00.696592 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:01 crc kubenswrapper[4664]: I1013 08:45:01.194410 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f"] Oct 13 08:45:01 crc kubenswrapper[4664]: I1013 08:45:01.602387 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" event={"ID":"3ca8a96f-296c-4c3a-be7d-79deed75aa63","Type":"ContainerStarted","Data":"2cf81e39e194fd8a192daf8361c65c5ad91d59cbe253d07d44f32883484b70aa"} Oct 13 08:45:01 crc kubenswrapper[4664]: I1013 08:45:01.602678 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" event={"ID":"3ca8a96f-296c-4c3a-be7d-79deed75aa63","Type":"ContainerStarted","Data":"fcd59169bb00dcbd78f0f7554eee592d1be3738044e2d618ba53b6e7979d6a25"} Oct 13 08:45:01 crc kubenswrapper[4664]: I1013 08:45:01.623090 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" podStartSLOduration=1.6230698239999999 podStartE2EDuration="1.623069824s" podCreationTimestamp="2025-10-13 08:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 08:45:01.622517259 +0000 UTC m=+7109.309962461" watchObservedRunningTime="2025-10-13 08:45:01.623069824 +0000 UTC m=+7109.310515016" Oct 13 08:45:02 crc kubenswrapper[4664]: I1013 08:45:02.613701 4664 generic.go:334] "Generic (PLEG): container finished" podID="3ca8a96f-296c-4c3a-be7d-79deed75aa63" containerID="2cf81e39e194fd8a192daf8361c65c5ad91d59cbe253d07d44f32883484b70aa" exitCode=0 Oct 13 08:45:02 crc kubenswrapper[4664]: I1013 08:45:02.613748 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" event={"ID":"3ca8a96f-296c-4c3a-be7d-79deed75aa63","Type":"ContainerDied","Data":"2cf81e39e194fd8a192daf8361c65c5ad91d59cbe253d07d44f32883484b70aa"} Oct 13 08:45:03 crc kubenswrapper[4664]: I1013 08:45:03.997054 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.194699 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ca8a96f-296c-4c3a-be7d-79deed75aa63-secret-volume\") pod \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.194957 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ca8a96f-296c-4c3a-be7d-79deed75aa63-config-volume\") pod \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.194987 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-652bd\" (UniqueName: \"kubernetes.io/projected/3ca8a96f-296c-4c3a-be7d-79deed75aa63-kube-api-access-652bd\") pod \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\" (UID: \"3ca8a96f-296c-4c3a-be7d-79deed75aa63\") " Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.196525 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ca8a96f-296c-4c3a-be7d-79deed75aa63-config-volume" (OuterVolumeSpecName: "config-volume") pod "3ca8a96f-296c-4c3a-be7d-79deed75aa63" (UID: "3ca8a96f-296c-4c3a-be7d-79deed75aa63"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.205042 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ca8a96f-296c-4c3a-be7d-79deed75aa63-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3ca8a96f-296c-4c3a-be7d-79deed75aa63" (UID: "3ca8a96f-296c-4c3a-be7d-79deed75aa63"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.213276 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ca8a96f-296c-4c3a-be7d-79deed75aa63-kube-api-access-652bd" (OuterVolumeSpecName: "kube-api-access-652bd") pod "3ca8a96f-296c-4c3a-be7d-79deed75aa63" (UID: "3ca8a96f-296c-4c3a-be7d-79deed75aa63"). InnerVolumeSpecName "kube-api-access-652bd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.296965 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ca8a96f-296c-4c3a-be7d-79deed75aa63-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.296999 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-652bd\" (UniqueName: \"kubernetes.io/projected/3ca8a96f-296c-4c3a-be7d-79deed75aa63-kube-api-access-652bd\") on node \"crc\" DevicePath \"\"" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.297010 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ca8a96f-296c-4c3a-be7d-79deed75aa63-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.636266 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" event={"ID":"3ca8a96f-296c-4c3a-be7d-79deed75aa63","Type":"ContainerDied","Data":"fcd59169bb00dcbd78f0f7554eee592d1be3738044e2d618ba53b6e7979d6a25"} Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.636311 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcd59169bb00dcbd78f0f7554eee592d1be3738044e2d618ba53b6e7979d6a25" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.636323 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339085-8447f" Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.726169 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6"] Oct 13 08:45:04 crc kubenswrapper[4664]: I1013 08:45:04.733883 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339040-r5mn6"] Oct 13 08:45:05 crc kubenswrapper[4664]: I1013 08:45:05.065001 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13feaed1-f47c-4636-b03a-ce11cdb2ae90" path="/var/lib/kubelet/pods/13feaed1-f47c-4636-b03a-ce11cdb2ae90/volumes" Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.830403 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g5wlg"] Oct 13 08:45:22 crc kubenswrapper[4664]: E1013 08:45:22.831105 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ca8a96f-296c-4c3a-be7d-79deed75aa63" containerName="collect-profiles" Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.831116 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ca8a96f-296c-4c3a-be7d-79deed75aa63" containerName="collect-profiles" Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.831319 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ca8a96f-296c-4c3a-be7d-79deed75aa63" containerName="collect-profiles" Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.832586 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.848050 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g5wlg"] Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.981380 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2b6b\" (UniqueName: \"kubernetes.io/projected/f9427065-767d-4977-bf9f-524dcc4e1a20-kube-api-access-h2b6b\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.981671 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-catalog-content\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:22 crc kubenswrapper[4664]: I1013 08:45:22.981861 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-utilities\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.083533 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2b6b\" (UniqueName: \"kubernetes.io/projected/f9427065-767d-4977-bf9f-524dcc4e1a20-kube-api-access-h2b6b\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.083608 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-catalog-content\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.083704 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-utilities\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.084165 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-catalog-content\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.084293 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-utilities\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.102500 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2b6b\" (UniqueName: \"kubernetes.io/projected/f9427065-767d-4977-bf9f-524dcc4e1a20-kube-api-access-h2b6b\") pod \"community-operators-g5wlg\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.165525 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.732740 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g5wlg"] Oct 13 08:45:23 crc kubenswrapper[4664]: W1013 08:45:23.746642 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9427065_767d_4977_bf9f_524dcc4e1a20.slice/crio-3e366184b556f9bbb0a646d88f867016b0ea88b032ac17cf84a43dcdf349e3ea WatchSource:0}: Error finding container 3e366184b556f9bbb0a646d88f867016b0ea88b032ac17cf84a43dcdf349e3ea: Status 404 returned error can't find the container with id 3e366184b556f9bbb0a646d88f867016b0ea88b032ac17cf84a43dcdf349e3ea Oct 13 08:45:23 crc kubenswrapper[4664]: I1013 08:45:23.868548 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5wlg" event={"ID":"f9427065-767d-4977-bf9f-524dcc4e1a20","Type":"ContainerStarted","Data":"3e366184b556f9bbb0a646d88f867016b0ea88b032ac17cf84a43dcdf349e3ea"} Oct 13 08:45:24 crc kubenswrapper[4664]: I1013 08:45:24.883221 4664 generic.go:334] "Generic (PLEG): container finished" podID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerID="063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31" exitCode=0 Oct 13 08:45:24 crc kubenswrapper[4664]: I1013 08:45:24.883727 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5wlg" event={"ID":"f9427065-767d-4977-bf9f-524dcc4e1a20","Type":"ContainerDied","Data":"063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31"} Oct 13 08:45:26 crc kubenswrapper[4664]: I1013 08:45:26.913410 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5wlg" event={"ID":"f9427065-767d-4977-bf9f-524dcc4e1a20","Type":"ContainerStarted","Data":"20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70"} Oct 13 08:45:27 crc kubenswrapper[4664]: I1013 08:45:27.930960 4664 generic.go:334] "Generic (PLEG): container finished" podID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerID="20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70" exitCode=0 Oct 13 08:45:27 crc kubenswrapper[4664]: I1013 08:45:27.931074 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5wlg" event={"ID":"f9427065-767d-4977-bf9f-524dcc4e1a20","Type":"ContainerDied","Data":"20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70"} Oct 13 08:45:28 crc kubenswrapper[4664]: I1013 08:45:28.943604 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5wlg" event={"ID":"f9427065-767d-4977-bf9f-524dcc4e1a20","Type":"ContainerStarted","Data":"bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc"} Oct 13 08:45:28 crc kubenswrapper[4664]: I1013 08:45:28.964586 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g5wlg" podStartSLOduration=3.240819878 podStartE2EDuration="6.964565245s" podCreationTimestamp="2025-10-13 08:45:22 +0000 UTC" firstStartedPulling="2025-10-13 08:45:24.887379317 +0000 UTC m=+7132.574824529" lastFinishedPulling="2025-10-13 08:45:28.611124684 +0000 UTC m=+7136.298569896" observedRunningTime="2025-10-13 08:45:28.964091432 +0000 UTC m=+7136.651536634" watchObservedRunningTime="2025-10-13 08:45:28.964565245 +0000 UTC m=+7136.652010447" Oct 13 08:45:33 crc kubenswrapper[4664]: I1013 08:45:33.165676 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:33 crc kubenswrapper[4664]: I1013 08:45:33.166306 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:34 crc kubenswrapper[4664]: I1013 08:45:34.226572 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-g5wlg" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="registry-server" probeResult="failure" output=< Oct 13 08:45:34 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:45:34 crc kubenswrapper[4664]: > Oct 13 08:45:43 crc kubenswrapper[4664]: I1013 08:45:43.222434 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:43 crc kubenswrapper[4664]: I1013 08:45:43.298978 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:43 crc kubenswrapper[4664]: I1013 08:45:43.469735 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g5wlg"] Oct 13 08:45:43 crc kubenswrapper[4664]: I1013 08:45:43.827543 4664 scope.go:117] "RemoveContainer" containerID="a11ca31b840fe4681397382dd25aa869fb1d325cf4a911b9a175a8cfa5176004" Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.097584 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g5wlg" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="registry-server" containerID="cri-o://bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc" gracePeriod=2 Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.609740 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.751473 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-utilities\") pod \"f9427065-767d-4977-bf9f-524dcc4e1a20\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.751526 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-catalog-content\") pod \"f9427065-767d-4977-bf9f-524dcc4e1a20\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.751557 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2b6b\" (UniqueName: \"kubernetes.io/projected/f9427065-767d-4977-bf9f-524dcc4e1a20-kube-api-access-h2b6b\") pod \"f9427065-767d-4977-bf9f-524dcc4e1a20\" (UID: \"f9427065-767d-4977-bf9f-524dcc4e1a20\") " Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.753210 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-utilities" (OuterVolumeSpecName: "utilities") pod "f9427065-767d-4977-bf9f-524dcc4e1a20" (UID: "f9427065-767d-4977-bf9f-524dcc4e1a20"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.770199 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9427065-767d-4977-bf9f-524dcc4e1a20-kube-api-access-h2b6b" (OuterVolumeSpecName: "kube-api-access-h2b6b") pod "f9427065-767d-4977-bf9f-524dcc4e1a20" (UID: "f9427065-767d-4977-bf9f-524dcc4e1a20"). InnerVolumeSpecName "kube-api-access-h2b6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.819538 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9427065-767d-4977-bf9f-524dcc4e1a20" (UID: "f9427065-767d-4977-bf9f-524dcc4e1a20"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.854169 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.854214 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9427065-767d-4977-bf9f-524dcc4e1a20-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:45:45 crc kubenswrapper[4664]: I1013 08:45:45.854231 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2b6b\" (UniqueName: \"kubernetes.io/projected/f9427065-767d-4977-bf9f-524dcc4e1a20-kube-api-access-h2b6b\") on node \"crc\" DevicePath \"\"" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.109280 4664 generic.go:334] "Generic (PLEG): container finished" podID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerID="bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc" exitCode=0 Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.109322 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5wlg" event={"ID":"f9427065-767d-4977-bf9f-524dcc4e1a20","Type":"ContainerDied","Data":"bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc"} Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.109346 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5wlg" event={"ID":"f9427065-767d-4977-bf9f-524dcc4e1a20","Type":"ContainerDied","Data":"3e366184b556f9bbb0a646d88f867016b0ea88b032ac17cf84a43dcdf349e3ea"} Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.109363 4664 scope.go:117] "RemoveContainer" containerID="bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.109479 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5wlg" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.133035 4664 scope.go:117] "RemoveContainer" containerID="20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.141083 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g5wlg"] Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.150518 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g5wlg"] Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.184570 4664 scope.go:117] "RemoveContainer" containerID="063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.210832 4664 scope.go:117] "RemoveContainer" containerID="bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc" Oct 13 08:45:46 crc kubenswrapper[4664]: E1013 08:45:46.212006 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc\": container with ID starting with bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc not found: ID does not exist" containerID="bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.212040 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc"} err="failed to get container status \"bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc\": rpc error: code = NotFound desc = could not find container \"bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc\": container with ID starting with bee97d9bd7222113e6e808c5976fa9f8e2d8a0406cc5315e317a5296db5601dc not found: ID does not exist" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.212061 4664 scope.go:117] "RemoveContainer" containerID="20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70" Oct 13 08:45:46 crc kubenswrapper[4664]: E1013 08:45:46.212313 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70\": container with ID starting with 20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70 not found: ID does not exist" containerID="20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.212346 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70"} err="failed to get container status \"20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70\": rpc error: code = NotFound desc = could not find container \"20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70\": container with ID starting with 20bf11fbfe2b5f0bfaa126e198dbd1f9b21129c9ca6d814346ce8749ee291c70 not found: ID does not exist" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.212362 4664 scope.go:117] "RemoveContainer" containerID="063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31" Oct 13 08:45:46 crc kubenswrapper[4664]: E1013 08:45:46.212603 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31\": container with ID starting with 063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31 not found: ID does not exist" containerID="063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31" Oct 13 08:45:46 crc kubenswrapper[4664]: I1013 08:45:46.212635 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31"} err="failed to get container status \"063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31\": rpc error: code = NotFound desc = could not find container \"063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31\": container with ID starting with 063c08925137813ef74338d4164bff524f8712d30ff59c1b06136331b49e3c31 not found: ID does not exist" Oct 13 08:45:47 crc kubenswrapper[4664]: I1013 08:45:47.058434 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" path="/var/lib/kubelet/pods/f9427065-767d-4977-bf9f-524dcc4e1a20/volumes" Oct 13 08:45:58 crc kubenswrapper[4664]: I1013 08:45:58.811750 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:45:58 crc kubenswrapper[4664]: I1013 08:45:58.812293 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:46:28 crc kubenswrapper[4664]: I1013 08:46:28.811704 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:46:28 crc kubenswrapper[4664]: I1013 08:46:28.812373 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.812640 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.813455 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.813531 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.814843 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"866aea13c211f78eaa8e4d726e6114b7e27a73e61908a804ea8d0c909dc1a765"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.814943 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://866aea13c211f78eaa8e4d726e6114b7e27a73e61908a804ea8d0c909dc1a765" gracePeriod=600 Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.954631 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="866aea13c211f78eaa8e4d726e6114b7e27a73e61908a804ea8d0c909dc1a765" exitCode=0 Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.954690 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"866aea13c211f78eaa8e4d726e6114b7e27a73e61908a804ea8d0c909dc1a765"} Oct 13 08:46:58 crc kubenswrapper[4664]: I1013 08:46:58.955079 4664 scope.go:117] "RemoveContainer" containerID="1751467dd0a02eb432ae3e8f43041a158ebb903b976819f5f09e57bf74e4d5ad" Oct 13 08:46:59 crc kubenswrapper[4664]: I1013 08:46:59.965701 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97"} Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.526261 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bjwdk"] Oct 13 08:47:26 crc kubenswrapper[4664]: E1013 08:47:26.528504 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="extract-content" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.528598 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="extract-content" Oct 13 08:47:26 crc kubenswrapper[4664]: E1013 08:47:26.528687 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="registry-server" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.528760 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="registry-server" Oct 13 08:47:26 crc kubenswrapper[4664]: E1013 08:47:26.528896 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="extract-utilities" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.529639 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="extract-utilities" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.530036 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9427065-767d-4977-bf9f-524dcc4e1a20" containerName="registry-server" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.531732 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.546279 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bjwdk"] Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.623994 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-utilities\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.624046 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-catalog-content\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.624296 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s789f\" (UniqueName: \"kubernetes.io/projected/a129cb61-2a49-4592-9b45-4dcab654eb71-kube-api-access-s789f\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.726013 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s789f\" (UniqueName: \"kubernetes.io/projected/a129cb61-2a49-4592-9b45-4dcab654eb71-kube-api-access-s789f\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.726171 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-utilities\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.726196 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-catalog-content\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.726697 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-catalog-content\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.726971 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-utilities\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.747414 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s789f\" (UniqueName: \"kubernetes.io/projected/a129cb61-2a49-4592-9b45-4dcab654eb71-kube-api-access-s789f\") pod \"certified-operators-bjwdk\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:26 crc kubenswrapper[4664]: I1013 08:47:26.855069 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:27 crc kubenswrapper[4664]: I1013 08:47:27.253025 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bjwdk"] Oct 13 08:47:28 crc kubenswrapper[4664]: I1013 08:47:28.264702 4664 generic.go:334] "Generic (PLEG): container finished" podID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerID="64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb" exitCode=0 Oct 13 08:47:28 crc kubenswrapper[4664]: I1013 08:47:28.264769 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjwdk" event={"ID":"a129cb61-2a49-4592-9b45-4dcab654eb71","Type":"ContainerDied","Data":"64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb"} Oct 13 08:47:28 crc kubenswrapper[4664]: I1013 08:47:28.265036 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjwdk" event={"ID":"a129cb61-2a49-4592-9b45-4dcab654eb71","Type":"ContainerStarted","Data":"03761b6b35c90addc4c5c69a4bc611a21ff6bab1a987f608dc3de80e2577c81e"} Oct 13 08:47:29 crc kubenswrapper[4664]: I1013 08:47:29.278888 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjwdk" event={"ID":"a129cb61-2a49-4592-9b45-4dcab654eb71","Type":"ContainerStarted","Data":"fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30"} Oct 13 08:47:31 crc kubenswrapper[4664]: I1013 08:47:31.301753 4664 generic.go:334] "Generic (PLEG): container finished" podID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerID="fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30" exitCode=0 Oct 13 08:47:31 crc kubenswrapper[4664]: I1013 08:47:31.301845 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjwdk" event={"ID":"a129cb61-2a49-4592-9b45-4dcab654eb71","Type":"ContainerDied","Data":"fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30"} Oct 13 08:47:32 crc kubenswrapper[4664]: I1013 08:47:32.332261 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjwdk" event={"ID":"a129cb61-2a49-4592-9b45-4dcab654eb71","Type":"ContainerStarted","Data":"86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46"} Oct 13 08:47:32 crc kubenswrapper[4664]: I1013 08:47:32.356625 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bjwdk" podStartSLOduration=2.750509692 podStartE2EDuration="6.356604448s" podCreationTimestamp="2025-10-13 08:47:26 +0000 UTC" firstStartedPulling="2025-10-13 08:47:28.267029477 +0000 UTC m=+7255.954474669" lastFinishedPulling="2025-10-13 08:47:31.873124203 +0000 UTC m=+7259.560569425" observedRunningTime="2025-10-13 08:47:32.353286279 +0000 UTC m=+7260.040731501" watchObservedRunningTime="2025-10-13 08:47:32.356604448 +0000 UTC m=+7260.044049640" Oct 13 08:47:36 crc kubenswrapper[4664]: I1013 08:47:36.855576 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:36 crc kubenswrapper[4664]: I1013 08:47:36.856067 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:37 crc kubenswrapper[4664]: I1013 08:47:37.916666 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-bjwdk" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="registry-server" probeResult="failure" output=< Oct 13 08:47:37 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:47:37 crc kubenswrapper[4664]: > Oct 13 08:47:46 crc kubenswrapper[4664]: I1013 08:47:46.916601 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:46 crc kubenswrapper[4664]: I1013 08:47:46.986225 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:47 crc kubenswrapper[4664]: I1013 08:47:47.156686 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bjwdk"] Oct 13 08:47:48 crc kubenswrapper[4664]: I1013 08:47:48.524093 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bjwdk" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="registry-server" containerID="cri-o://86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46" gracePeriod=2 Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.374601 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.423853 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s789f\" (UniqueName: \"kubernetes.io/projected/a129cb61-2a49-4592-9b45-4dcab654eb71-kube-api-access-s789f\") pod \"a129cb61-2a49-4592-9b45-4dcab654eb71\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.424018 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-catalog-content\") pod \"a129cb61-2a49-4592-9b45-4dcab654eb71\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.424044 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-utilities\") pod \"a129cb61-2a49-4592-9b45-4dcab654eb71\" (UID: \"a129cb61-2a49-4592-9b45-4dcab654eb71\") " Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.425409 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-utilities" (OuterVolumeSpecName: "utilities") pod "a129cb61-2a49-4592-9b45-4dcab654eb71" (UID: "a129cb61-2a49-4592-9b45-4dcab654eb71"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.427894 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.439868 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a129cb61-2a49-4592-9b45-4dcab654eb71-kube-api-access-s789f" (OuterVolumeSpecName: "kube-api-access-s789f") pod "a129cb61-2a49-4592-9b45-4dcab654eb71" (UID: "a129cb61-2a49-4592-9b45-4dcab654eb71"). InnerVolumeSpecName "kube-api-access-s789f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.474815 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a129cb61-2a49-4592-9b45-4dcab654eb71" (UID: "a129cb61-2a49-4592-9b45-4dcab654eb71"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.531179 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s789f\" (UniqueName: \"kubernetes.io/projected/a129cb61-2a49-4592-9b45-4dcab654eb71-kube-api-access-s789f\") on node \"crc\" DevicePath \"\"" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.531234 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a129cb61-2a49-4592-9b45-4dcab654eb71-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.536308 4664 generic.go:334] "Generic (PLEG): container finished" podID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerID="86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46" exitCode=0 Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.536368 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjwdk" event={"ID":"a129cb61-2a49-4592-9b45-4dcab654eb71","Type":"ContainerDied","Data":"86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46"} Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.536429 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjwdk" event={"ID":"a129cb61-2a49-4592-9b45-4dcab654eb71","Type":"ContainerDied","Data":"03761b6b35c90addc4c5c69a4bc611a21ff6bab1a987f608dc3de80e2577c81e"} Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.536452 4664 scope.go:117] "RemoveContainer" containerID="86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.536621 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjwdk" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.576920 4664 scope.go:117] "RemoveContainer" containerID="fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.584413 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bjwdk"] Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.594179 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bjwdk"] Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.617206 4664 scope.go:117] "RemoveContainer" containerID="64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.651769 4664 scope.go:117] "RemoveContainer" containerID="86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46" Oct 13 08:47:49 crc kubenswrapper[4664]: E1013 08:47:49.652162 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46\": container with ID starting with 86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46 not found: ID does not exist" containerID="86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.652191 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46"} err="failed to get container status \"86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46\": rpc error: code = NotFound desc = could not find container \"86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46\": container with ID starting with 86fc8b3a0b93710ed325ac61dfbe10504b3a08e5acd5778852efee92bb6efb46 not found: ID does not exist" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.652212 4664 scope.go:117] "RemoveContainer" containerID="fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30" Oct 13 08:47:49 crc kubenswrapper[4664]: E1013 08:47:49.652542 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30\": container with ID starting with fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30 not found: ID does not exist" containerID="fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.652580 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30"} err="failed to get container status \"fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30\": rpc error: code = NotFound desc = could not find container \"fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30\": container with ID starting with fa7190ddf26b744d5d74e9d48077ad005c8a1034ec8fcebbe3554e03e962ff30 not found: ID does not exist" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.652605 4664 scope.go:117] "RemoveContainer" containerID="64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb" Oct 13 08:47:49 crc kubenswrapper[4664]: E1013 08:47:49.653254 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb\": container with ID starting with 64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb not found: ID does not exist" containerID="64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb" Oct 13 08:47:49 crc kubenswrapper[4664]: I1013 08:47:49.653280 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb"} err="failed to get container status \"64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb\": rpc error: code = NotFound desc = could not find container \"64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb\": container with ID starting with 64639501d2bf5ef518f7c738c68179d2845e8637bd47365453afdaf34dbad3cb not found: ID does not exist" Oct 13 08:47:51 crc kubenswrapper[4664]: I1013 08:47:51.058988 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" path="/var/lib/kubelet/pods/a129cb61-2a49-4592-9b45-4dcab654eb71/volumes" Oct 13 08:49:28 crc kubenswrapper[4664]: I1013 08:49:28.811615 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:49:28 crc kubenswrapper[4664]: I1013 08:49:28.812333 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:49:58 crc kubenswrapper[4664]: I1013 08:49:58.812630 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:49:58 crc kubenswrapper[4664]: I1013 08:49:58.813350 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:50:28 crc kubenswrapper[4664]: I1013 08:50:28.812150 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:50:28 crc kubenswrapper[4664]: I1013 08:50:28.812730 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:50:28 crc kubenswrapper[4664]: I1013 08:50:28.812786 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:50:28 crc kubenswrapper[4664]: I1013 08:50:28.813413 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:50:28 crc kubenswrapper[4664]: I1013 08:50:28.813482 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" gracePeriod=600 Oct 13 08:50:28 crc kubenswrapper[4664]: E1013 08:50:28.930717 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:50:29 crc kubenswrapper[4664]: I1013 08:50:29.299514 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" exitCode=0 Oct 13 08:50:29 crc kubenswrapper[4664]: I1013 08:50:29.299575 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97"} Oct 13 08:50:29 crc kubenswrapper[4664]: I1013 08:50:29.299610 4664 scope.go:117] "RemoveContainer" containerID="866aea13c211f78eaa8e4d726e6114b7e27a73e61908a804ea8d0c909dc1a765" Oct 13 08:50:29 crc kubenswrapper[4664]: I1013 08:50:29.300358 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:50:29 crc kubenswrapper[4664]: E1013 08:50:29.300691 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:50:40 crc kubenswrapper[4664]: I1013 08:50:40.047641 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:50:40 crc kubenswrapper[4664]: E1013 08:50:40.048594 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:50:55 crc kubenswrapper[4664]: I1013 08:50:55.047774 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:50:55 crc kubenswrapper[4664]: E1013 08:50:55.048519 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:51:08 crc kubenswrapper[4664]: I1013 08:51:08.047024 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:51:08 crc kubenswrapper[4664]: E1013 08:51:08.047714 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:51:21 crc kubenswrapper[4664]: I1013 08:51:21.047822 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:51:21 crc kubenswrapper[4664]: E1013 08:51:21.048769 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:51:32 crc kubenswrapper[4664]: I1013 08:51:32.047856 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:51:32 crc kubenswrapper[4664]: E1013 08:51:32.048849 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:51:43 crc kubenswrapper[4664]: I1013 08:51:43.060566 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:51:43 crc kubenswrapper[4664]: E1013 08:51:43.062562 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:51:58 crc kubenswrapper[4664]: I1013 08:51:58.047008 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:51:58 crc kubenswrapper[4664]: E1013 08:51:58.047828 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:52:12 crc kubenswrapper[4664]: I1013 08:52:12.047562 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:52:12 crc kubenswrapper[4664]: E1013 08:52:12.048658 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:52:23 crc kubenswrapper[4664]: I1013 08:52:23.057849 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:52:23 crc kubenswrapper[4664]: E1013 08:52:23.058549 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:52:34 crc kubenswrapper[4664]: I1013 08:52:34.046571 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:52:34 crc kubenswrapper[4664]: E1013 08:52:34.047333 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:52:47 crc kubenswrapper[4664]: I1013 08:52:47.046915 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:52:47 crc kubenswrapper[4664]: E1013 08:52:47.047605 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:53:01 crc kubenswrapper[4664]: I1013 08:53:01.047514 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:53:01 crc kubenswrapper[4664]: E1013 08:53:01.048971 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:53:14 crc kubenswrapper[4664]: I1013 08:53:14.047648 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:53:14 crc kubenswrapper[4664]: E1013 08:53:14.048846 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:53:28 crc kubenswrapper[4664]: I1013 08:53:28.047635 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:53:28 crc kubenswrapper[4664]: E1013 08:53:28.049761 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:53:40 crc kubenswrapper[4664]: I1013 08:53:40.047370 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:53:40 crc kubenswrapper[4664]: E1013 08:53:40.048218 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.127045 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j449d"] Oct 13 08:53:43 crc kubenswrapper[4664]: E1013 08:53:43.127938 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="extract-utilities" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.127959 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="extract-utilities" Oct 13 08:53:43 crc kubenswrapper[4664]: E1013 08:53:43.127999 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="registry-server" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.128010 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="registry-server" Oct 13 08:53:43 crc kubenswrapper[4664]: E1013 08:53:43.128053 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="extract-content" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.128064 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="extract-content" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.128401 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="a129cb61-2a49-4592-9b45-4dcab654eb71" containerName="registry-server" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.131158 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.155655 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j449d"] Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.158841 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-catalog-content\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.158892 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g55jw\" (UniqueName: \"kubernetes.io/projected/1cc79530-fb40-4e1a-9856-a38fcb524779-kube-api-access-g55jw\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.158915 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-utilities\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.259773 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-catalog-content\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.259860 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g55jw\" (UniqueName: \"kubernetes.io/projected/1cc79530-fb40-4e1a-9856-a38fcb524779-kube-api-access-g55jw\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.259882 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-utilities\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.260224 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-catalog-content\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.260726 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-utilities\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.281020 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g55jw\" (UniqueName: \"kubernetes.io/projected/1cc79530-fb40-4e1a-9856-a38fcb524779-kube-api-access-g55jw\") pod \"redhat-operators-j449d\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:43 crc kubenswrapper[4664]: I1013 08:53:43.467457 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:44 crc kubenswrapper[4664]: I1013 08:53:44.329644 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j449d"] Oct 13 08:53:44 crc kubenswrapper[4664]: I1013 08:53:44.418655 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j449d" event={"ID":"1cc79530-fb40-4e1a-9856-a38fcb524779","Type":"ContainerStarted","Data":"6c09c5f05e3b4652f4017ebcef5e90d967850b03d4bc2d46d373c3278f65db75"} Oct 13 08:53:45 crc kubenswrapper[4664]: I1013 08:53:45.429022 4664 generic.go:334] "Generic (PLEG): container finished" podID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerID="1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943" exitCode=0 Oct 13 08:53:45 crc kubenswrapper[4664]: I1013 08:53:45.429234 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j449d" event={"ID":"1cc79530-fb40-4e1a-9856-a38fcb524779","Type":"ContainerDied","Data":"1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943"} Oct 13 08:53:45 crc kubenswrapper[4664]: I1013 08:53:45.432944 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:53:46 crc kubenswrapper[4664]: I1013 08:53:46.443682 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j449d" event={"ID":"1cc79530-fb40-4e1a-9856-a38fcb524779","Type":"ContainerStarted","Data":"7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889"} Oct 13 08:53:50 crc kubenswrapper[4664]: I1013 08:53:50.487027 4664 generic.go:334] "Generic (PLEG): container finished" podID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerID="7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889" exitCode=0 Oct 13 08:53:50 crc kubenswrapper[4664]: I1013 08:53:50.487598 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j449d" event={"ID":"1cc79530-fb40-4e1a-9856-a38fcb524779","Type":"ContainerDied","Data":"7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889"} Oct 13 08:53:51 crc kubenswrapper[4664]: I1013 08:53:51.499606 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j449d" event={"ID":"1cc79530-fb40-4e1a-9856-a38fcb524779","Type":"ContainerStarted","Data":"8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31"} Oct 13 08:53:51 crc kubenswrapper[4664]: I1013 08:53:51.532760 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j449d" podStartSLOduration=3.044624565 podStartE2EDuration="8.532738676s" podCreationTimestamp="2025-10-13 08:53:43 +0000 UTC" firstStartedPulling="2025-10-13 08:53:45.431649034 +0000 UTC m=+7633.119094226" lastFinishedPulling="2025-10-13 08:53:50.919763145 +0000 UTC m=+7638.607208337" observedRunningTime="2025-10-13 08:53:51.526468657 +0000 UTC m=+7639.213913869" watchObservedRunningTime="2025-10-13 08:53:51.532738676 +0000 UTC m=+7639.220183868" Oct 13 08:53:52 crc kubenswrapper[4664]: I1013 08:53:52.969206 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fs2d4"] Oct 13 08:53:52 crc kubenswrapper[4664]: I1013 08:53:52.972293 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.000572 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fs2d4"] Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.060386 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:53:53 crc kubenswrapper[4664]: E1013 08:53:53.060638 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.143342 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d589p\" (UniqueName: \"kubernetes.io/projected/3390e338-8414-471b-9a53-4806300cca8e-kube-api-access-d589p\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.143679 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-catalog-content\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.143929 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-utilities\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.245952 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-catalog-content\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.246714 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-catalog-content\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.247588 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-utilities\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.248084 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-utilities\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.248341 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d589p\" (UniqueName: \"kubernetes.io/projected/3390e338-8414-471b-9a53-4806300cca8e-kube-api-access-d589p\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.279547 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d589p\" (UniqueName: \"kubernetes.io/projected/3390e338-8414-471b-9a53-4806300cca8e-kube-api-access-d589p\") pod \"redhat-marketplace-fs2d4\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.301525 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.467570 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.468110 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:53:53 crc kubenswrapper[4664]: I1013 08:53:53.934102 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fs2d4"] Oct 13 08:53:54 crc kubenswrapper[4664]: I1013 08:53:54.561205 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j449d" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="registry-server" probeResult="failure" output=< Oct 13 08:53:54 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:53:54 crc kubenswrapper[4664]: > Oct 13 08:53:54 crc kubenswrapper[4664]: I1013 08:53:54.562542 4664 generic.go:334] "Generic (PLEG): container finished" podID="3390e338-8414-471b-9a53-4806300cca8e" containerID="ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2" exitCode=0 Oct 13 08:53:54 crc kubenswrapper[4664]: I1013 08:53:54.562574 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fs2d4" event={"ID":"3390e338-8414-471b-9a53-4806300cca8e","Type":"ContainerDied","Data":"ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2"} Oct 13 08:53:54 crc kubenswrapper[4664]: I1013 08:53:54.562598 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fs2d4" event={"ID":"3390e338-8414-471b-9a53-4806300cca8e","Type":"ContainerStarted","Data":"362e7f88cbfb18eff80db901ea512b6cca78ce4eb6562f5faf03d62bcd17d0c0"} Oct 13 08:53:56 crc kubenswrapper[4664]: I1013 08:53:56.582097 4664 generic.go:334] "Generic (PLEG): container finished" podID="3390e338-8414-471b-9a53-4806300cca8e" containerID="a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af" exitCode=0 Oct 13 08:53:56 crc kubenswrapper[4664]: I1013 08:53:56.582136 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fs2d4" event={"ID":"3390e338-8414-471b-9a53-4806300cca8e","Type":"ContainerDied","Data":"a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af"} Oct 13 08:53:57 crc kubenswrapper[4664]: I1013 08:53:57.596970 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fs2d4" event={"ID":"3390e338-8414-471b-9a53-4806300cca8e","Type":"ContainerStarted","Data":"4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585"} Oct 13 08:53:57 crc kubenswrapper[4664]: I1013 08:53:57.626472 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fs2d4" podStartSLOduration=3.047975095 podStartE2EDuration="5.626444209s" podCreationTimestamp="2025-10-13 08:53:52 +0000 UTC" firstStartedPulling="2025-10-13 08:53:54.564717327 +0000 UTC m=+7642.252162519" lastFinishedPulling="2025-10-13 08:53:57.143186441 +0000 UTC m=+7644.830631633" observedRunningTime="2025-10-13 08:53:57.617882989 +0000 UTC m=+7645.305328211" watchObservedRunningTime="2025-10-13 08:53:57.626444209 +0000 UTC m=+7645.313889441" Oct 13 08:54:03 crc kubenswrapper[4664]: I1013 08:54:03.302465 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:54:03 crc kubenswrapper[4664]: I1013 08:54:03.302836 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:54:03 crc kubenswrapper[4664]: I1013 08:54:03.368162 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:54:03 crc kubenswrapper[4664]: I1013 08:54:03.717923 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:54:03 crc kubenswrapper[4664]: I1013 08:54:03.778891 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fs2d4"] Oct 13 08:54:04 crc kubenswrapper[4664]: I1013 08:54:04.047413 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:54:04 crc kubenswrapper[4664]: E1013 08:54:04.047661 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:54:04 crc kubenswrapper[4664]: I1013 08:54:04.528823 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j449d" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="registry-server" probeResult="failure" output=< Oct 13 08:54:04 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:54:04 crc kubenswrapper[4664]: > Oct 13 08:54:05 crc kubenswrapper[4664]: I1013 08:54:05.681264 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fs2d4" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="registry-server" containerID="cri-o://4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585" gracePeriod=2 Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.191450 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.299779 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d589p\" (UniqueName: \"kubernetes.io/projected/3390e338-8414-471b-9a53-4806300cca8e-kube-api-access-d589p\") pod \"3390e338-8414-471b-9a53-4806300cca8e\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.300046 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-catalog-content\") pod \"3390e338-8414-471b-9a53-4806300cca8e\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.300097 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-utilities\") pod \"3390e338-8414-471b-9a53-4806300cca8e\" (UID: \"3390e338-8414-471b-9a53-4806300cca8e\") " Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.301081 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-utilities" (OuterVolumeSpecName: "utilities") pod "3390e338-8414-471b-9a53-4806300cca8e" (UID: "3390e338-8414-471b-9a53-4806300cca8e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.314968 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3390e338-8414-471b-9a53-4806300cca8e-kube-api-access-d589p" (OuterVolumeSpecName: "kube-api-access-d589p") pod "3390e338-8414-471b-9a53-4806300cca8e" (UID: "3390e338-8414-471b-9a53-4806300cca8e"). InnerVolumeSpecName "kube-api-access-d589p". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.323397 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3390e338-8414-471b-9a53-4806300cca8e" (UID: "3390e338-8414-471b-9a53-4806300cca8e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.403141 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.403191 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3390e338-8414-471b-9a53-4806300cca8e-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.403211 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d589p\" (UniqueName: \"kubernetes.io/projected/3390e338-8414-471b-9a53-4806300cca8e-kube-api-access-d589p\") on node \"crc\" DevicePath \"\"" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.700878 4664 generic.go:334] "Generic (PLEG): container finished" podID="3390e338-8414-471b-9a53-4806300cca8e" containerID="4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585" exitCode=0 Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.700953 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fs2d4" event={"ID":"3390e338-8414-471b-9a53-4806300cca8e","Type":"ContainerDied","Data":"4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585"} Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.701203 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fs2d4" event={"ID":"3390e338-8414-471b-9a53-4806300cca8e","Type":"ContainerDied","Data":"362e7f88cbfb18eff80db901ea512b6cca78ce4eb6562f5faf03d62bcd17d0c0"} Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.701000 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fs2d4" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.701228 4664 scope.go:117] "RemoveContainer" containerID="4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.737216 4664 scope.go:117] "RemoveContainer" containerID="a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.738154 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fs2d4"] Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.749107 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fs2d4"] Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.768486 4664 scope.go:117] "RemoveContainer" containerID="ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.811433 4664 scope.go:117] "RemoveContainer" containerID="4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585" Oct 13 08:54:06 crc kubenswrapper[4664]: E1013 08:54:06.812025 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585\": container with ID starting with 4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585 not found: ID does not exist" containerID="4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.812072 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585"} err="failed to get container status \"4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585\": rpc error: code = NotFound desc = could not find container \"4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585\": container with ID starting with 4fa001ffc0c282a412f32f143724e8359f3d3833cdb994a7076f10a3c790f585 not found: ID does not exist" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.812099 4664 scope.go:117] "RemoveContainer" containerID="a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af" Oct 13 08:54:06 crc kubenswrapper[4664]: E1013 08:54:06.812470 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af\": container with ID starting with a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af not found: ID does not exist" containerID="a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.812502 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af"} err="failed to get container status \"a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af\": rpc error: code = NotFound desc = could not find container \"a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af\": container with ID starting with a73817c24a1f63f6283b8ac9eb46770256d332189f4c6ebabc352709c52fe0af not found: ID does not exist" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.812523 4664 scope.go:117] "RemoveContainer" containerID="ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2" Oct 13 08:54:06 crc kubenswrapper[4664]: E1013 08:54:06.812916 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2\": container with ID starting with ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2 not found: ID does not exist" containerID="ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2" Oct 13 08:54:06 crc kubenswrapper[4664]: I1013 08:54:06.813008 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2"} err="failed to get container status \"ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2\": rpc error: code = NotFound desc = could not find container \"ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2\": container with ID starting with ec2e7adfd06178c9fd3b05bf6f431dcf026c1e0c52ae0c618ff9d911f24528b2 not found: ID does not exist" Oct 13 08:54:07 crc kubenswrapper[4664]: I1013 08:54:07.077037 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3390e338-8414-471b-9a53-4806300cca8e" path="/var/lib/kubelet/pods/3390e338-8414-471b-9a53-4806300cca8e/volumes" Oct 13 08:54:14 crc kubenswrapper[4664]: I1013 08:54:14.519242 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j449d" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="registry-server" probeResult="failure" output=< Oct 13 08:54:14 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:54:14 crc kubenswrapper[4664]: > Oct 13 08:54:19 crc kubenswrapper[4664]: I1013 08:54:19.047832 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:54:19 crc kubenswrapper[4664]: E1013 08:54:19.048638 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:54:23 crc kubenswrapper[4664]: I1013 08:54:23.524717 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:54:23 crc kubenswrapper[4664]: I1013 08:54:23.586949 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:54:24 crc kubenswrapper[4664]: I1013 08:54:24.174377 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j449d"] Oct 13 08:54:24 crc kubenswrapper[4664]: I1013 08:54:24.885214 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j449d" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="registry-server" containerID="cri-o://8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31" gracePeriod=2 Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.382539 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.490381 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-utilities\") pod \"1cc79530-fb40-4e1a-9856-a38fcb524779\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.490536 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g55jw\" (UniqueName: \"kubernetes.io/projected/1cc79530-fb40-4e1a-9856-a38fcb524779-kube-api-access-g55jw\") pod \"1cc79530-fb40-4e1a-9856-a38fcb524779\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.490688 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-catalog-content\") pod \"1cc79530-fb40-4e1a-9856-a38fcb524779\" (UID: \"1cc79530-fb40-4e1a-9856-a38fcb524779\") " Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.492050 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-utilities" (OuterVolumeSpecName: "utilities") pod "1cc79530-fb40-4e1a-9856-a38fcb524779" (UID: "1cc79530-fb40-4e1a-9856-a38fcb524779"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.513125 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cc79530-fb40-4e1a-9856-a38fcb524779-kube-api-access-g55jw" (OuterVolumeSpecName: "kube-api-access-g55jw") pod "1cc79530-fb40-4e1a-9856-a38fcb524779" (UID: "1cc79530-fb40-4e1a-9856-a38fcb524779"). InnerVolumeSpecName "kube-api-access-g55jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.576999 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1cc79530-fb40-4e1a-9856-a38fcb524779" (UID: "1cc79530-fb40-4e1a-9856-a38fcb524779"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.594943 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.594980 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g55jw\" (UniqueName: \"kubernetes.io/projected/1cc79530-fb40-4e1a-9856-a38fcb524779-kube-api-access-g55jw\") on node \"crc\" DevicePath \"\"" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.594993 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cc79530-fb40-4e1a-9856-a38fcb524779-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.899929 4664 generic.go:334] "Generic (PLEG): container finished" podID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerID="8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31" exitCode=0 Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.899972 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j449d" event={"ID":"1cc79530-fb40-4e1a-9856-a38fcb524779","Type":"ContainerDied","Data":"8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31"} Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.900008 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j449d" event={"ID":"1cc79530-fb40-4e1a-9856-a38fcb524779","Type":"ContainerDied","Data":"6c09c5f05e3b4652f4017ebcef5e90d967850b03d4bc2d46d373c3278f65db75"} Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.900025 4664 scope.go:117] "RemoveContainer" containerID="8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.900135 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j449d" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.949456 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j449d"] Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.951036 4664 scope.go:117] "RemoveContainer" containerID="7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889" Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.956887 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j449d"] Oct 13 08:54:25 crc kubenswrapper[4664]: I1013 08:54:25.981841 4664 scope.go:117] "RemoveContainer" containerID="1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943" Oct 13 08:54:26 crc kubenswrapper[4664]: I1013 08:54:26.039105 4664 scope.go:117] "RemoveContainer" containerID="8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31" Oct 13 08:54:26 crc kubenswrapper[4664]: E1013 08:54:26.039693 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31\": container with ID starting with 8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31 not found: ID does not exist" containerID="8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31" Oct 13 08:54:26 crc kubenswrapper[4664]: I1013 08:54:26.039741 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31"} err="failed to get container status \"8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31\": rpc error: code = NotFound desc = could not find container \"8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31\": container with ID starting with 8ce1f234b3b19b402528d92b84aa1902ba5cf4546b3e5e97ee44d64b4ead0d31 not found: ID does not exist" Oct 13 08:54:26 crc kubenswrapper[4664]: I1013 08:54:26.039760 4664 scope.go:117] "RemoveContainer" containerID="7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889" Oct 13 08:54:26 crc kubenswrapper[4664]: E1013 08:54:26.040369 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889\": container with ID starting with 7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889 not found: ID does not exist" containerID="7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889" Oct 13 08:54:26 crc kubenswrapper[4664]: I1013 08:54:26.040501 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889"} err="failed to get container status \"7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889\": rpc error: code = NotFound desc = could not find container \"7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889\": container with ID starting with 7786cfa1bd1743547af69668456a09d532cb62399b20bebc84b910ada0105889 not found: ID does not exist" Oct 13 08:54:26 crc kubenswrapper[4664]: I1013 08:54:26.040705 4664 scope.go:117] "RemoveContainer" containerID="1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943" Oct 13 08:54:26 crc kubenswrapper[4664]: E1013 08:54:26.041318 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943\": container with ID starting with 1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943 not found: ID does not exist" containerID="1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943" Oct 13 08:54:26 crc kubenswrapper[4664]: I1013 08:54:26.041344 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943"} err="failed to get container status \"1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943\": rpc error: code = NotFound desc = could not find container \"1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943\": container with ID starting with 1eb0e9f34a24db46b1e93d3ddd8b3ee0b5d8c901f30755b66f3a379511d4b943 not found: ID does not exist" Oct 13 08:54:27 crc kubenswrapper[4664]: I1013 08:54:27.059879 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" path="/var/lib/kubelet/pods/1cc79530-fb40-4e1a-9856-a38fcb524779/volumes" Oct 13 08:54:34 crc kubenswrapper[4664]: I1013 08:54:34.047863 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:54:34 crc kubenswrapper[4664]: E1013 08:54:34.049531 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:54:48 crc kubenswrapper[4664]: I1013 08:54:48.047961 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:54:48 crc kubenswrapper[4664]: E1013 08:54:48.048948 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:55:02 crc kubenswrapper[4664]: I1013 08:55:02.047102 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:55:02 crc kubenswrapper[4664]: E1013 08:55:02.048375 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:55:16 crc kubenswrapper[4664]: I1013 08:55:16.047732 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:55:16 crc kubenswrapper[4664]: E1013 08:55:16.049078 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 08:55:29 crc kubenswrapper[4664]: I1013 08:55:29.048056 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:55:29 crc kubenswrapper[4664]: I1013 08:55:29.601945 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"d31947d1482d089981ff433280849da51a3bf3253ba678d4ac07eb297c661118"} Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.818449 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4fbfp"] Oct 13 08:55:56 crc kubenswrapper[4664]: E1013 08:55:56.822198 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="extract-content" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.824935 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="extract-content" Oct 13 08:55:56 crc kubenswrapper[4664]: E1013 08:55:56.825034 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="extract-utilities" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.825046 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="extract-utilities" Oct 13 08:55:56 crc kubenswrapper[4664]: E1013 08:55:56.825061 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="registry-server" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.825067 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="registry-server" Oct 13 08:55:56 crc kubenswrapper[4664]: E1013 08:55:56.825078 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="extract-content" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.825084 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="extract-content" Oct 13 08:55:56 crc kubenswrapper[4664]: E1013 08:55:56.825115 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="registry-server" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.825121 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="registry-server" Oct 13 08:55:56 crc kubenswrapper[4664]: E1013 08:55:56.825138 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="extract-utilities" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.825146 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="extract-utilities" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.825529 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cc79530-fb40-4e1a-9856-a38fcb524779" containerName="registry-server" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.825548 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3390e338-8414-471b-9a53-4806300cca8e" containerName="registry-server" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.826967 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.834374 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4fbfp"] Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.943475 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-catalog-content\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.943556 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9qcm\" (UniqueName: \"kubernetes.io/projected/89bc2ac6-f510-4032-982d-810735ebf053-kube-api-access-s9qcm\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:56 crc kubenswrapper[4664]: I1013 08:55:56.943589 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-utilities\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.046995 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-catalog-content\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.047194 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9qcm\" (UniqueName: \"kubernetes.io/projected/89bc2ac6-f510-4032-982d-810735ebf053-kube-api-access-s9qcm\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.047230 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-utilities\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.047558 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-catalog-content\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.047675 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-utilities\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.077721 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9qcm\" (UniqueName: \"kubernetes.io/projected/89bc2ac6-f510-4032-982d-810735ebf053-kube-api-access-s9qcm\") pod \"community-operators-4fbfp\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.148758 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:55:57 crc kubenswrapper[4664]: I1013 08:55:57.993827 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4fbfp"] Oct 13 08:55:59 crc kubenswrapper[4664]: I1013 08:55:59.023328 4664 generic.go:334] "Generic (PLEG): container finished" podID="89bc2ac6-f510-4032-982d-810735ebf053" containerID="bec892c70bdd89744e568975c5b01f981997a7daf4dd46dba027c17ccf0a7076" exitCode=0 Oct 13 08:55:59 crc kubenswrapper[4664]: I1013 08:55:59.023937 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fbfp" event={"ID":"89bc2ac6-f510-4032-982d-810735ebf053","Type":"ContainerDied","Data":"bec892c70bdd89744e568975c5b01f981997a7daf4dd46dba027c17ccf0a7076"} Oct 13 08:55:59 crc kubenswrapper[4664]: I1013 08:55:59.023966 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fbfp" event={"ID":"89bc2ac6-f510-4032-982d-810735ebf053","Type":"ContainerStarted","Data":"a680732d458254ca6439a8435bca6fbfd887c891bfd1d681e49813f289e64177"} Oct 13 08:56:01 crc kubenswrapper[4664]: I1013 08:56:01.057491 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fbfp" event={"ID":"89bc2ac6-f510-4032-982d-810735ebf053","Type":"ContainerStarted","Data":"085498fe4234cdcea85d4e5e986fe12bea372dc5b28fe4aff6fc890427ecb102"} Oct 13 08:56:02 crc kubenswrapper[4664]: I1013 08:56:02.067670 4664 generic.go:334] "Generic (PLEG): container finished" podID="89bc2ac6-f510-4032-982d-810735ebf053" containerID="085498fe4234cdcea85d4e5e986fe12bea372dc5b28fe4aff6fc890427ecb102" exitCode=0 Oct 13 08:56:02 crc kubenswrapper[4664]: I1013 08:56:02.067763 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fbfp" event={"ID":"89bc2ac6-f510-4032-982d-810735ebf053","Type":"ContainerDied","Data":"085498fe4234cdcea85d4e5e986fe12bea372dc5b28fe4aff6fc890427ecb102"} Oct 13 08:56:03 crc kubenswrapper[4664]: I1013 08:56:03.079463 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fbfp" event={"ID":"89bc2ac6-f510-4032-982d-810735ebf053","Type":"ContainerStarted","Data":"e8e3f40655d958d724b1c0908d060c469111e0eebd93b162b51d8a807b093367"} Oct 13 08:56:03 crc kubenswrapper[4664]: I1013 08:56:03.108522 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4fbfp" podStartSLOduration=3.572172555 podStartE2EDuration="7.107588087s" podCreationTimestamp="2025-10-13 08:55:56 +0000 UTC" firstStartedPulling="2025-10-13 08:55:59.029198964 +0000 UTC m=+7766.716644176" lastFinishedPulling="2025-10-13 08:56:02.564614516 +0000 UTC m=+7770.252059708" observedRunningTime="2025-10-13 08:56:03.103338663 +0000 UTC m=+7770.790783865" watchObservedRunningTime="2025-10-13 08:56:03.107588087 +0000 UTC m=+7770.795033279" Oct 13 08:56:07 crc kubenswrapper[4664]: I1013 08:56:07.150735 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:56:07 crc kubenswrapper[4664]: I1013 08:56:07.151319 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:56:08 crc kubenswrapper[4664]: I1013 08:56:08.202564 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4fbfp" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="registry-server" probeResult="failure" output=< Oct 13 08:56:08 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:56:08 crc kubenswrapper[4664]: > Oct 13 08:56:17 crc kubenswrapper[4664]: I1013 08:56:17.208145 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:56:17 crc kubenswrapper[4664]: I1013 08:56:17.270190 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:56:17 crc kubenswrapper[4664]: I1013 08:56:17.454477 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4fbfp"] Oct 13 08:56:18 crc kubenswrapper[4664]: I1013 08:56:18.271068 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4fbfp" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="registry-server" containerID="cri-o://e8e3f40655d958d724b1c0908d060c469111e0eebd93b162b51d8a807b093367" gracePeriod=2 Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.283848 4664 generic.go:334] "Generic (PLEG): container finished" podID="89bc2ac6-f510-4032-982d-810735ebf053" containerID="e8e3f40655d958d724b1c0908d060c469111e0eebd93b162b51d8a807b093367" exitCode=0 Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.283935 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fbfp" event={"ID":"89bc2ac6-f510-4032-982d-810735ebf053","Type":"ContainerDied","Data":"e8e3f40655d958d724b1c0908d060c469111e0eebd93b162b51d8a807b093367"} Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.747583 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.918912 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-utilities\") pod \"89bc2ac6-f510-4032-982d-810735ebf053\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.919004 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9qcm\" (UniqueName: \"kubernetes.io/projected/89bc2ac6-f510-4032-982d-810735ebf053-kube-api-access-s9qcm\") pod \"89bc2ac6-f510-4032-982d-810735ebf053\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.919136 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-catalog-content\") pod \"89bc2ac6-f510-4032-982d-810735ebf053\" (UID: \"89bc2ac6-f510-4032-982d-810735ebf053\") " Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.920192 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-utilities" (OuterVolumeSpecName: "utilities") pod "89bc2ac6-f510-4032-982d-810735ebf053" (UID: "89bc2ac6-f510-4032-982d-810735ebf053"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.934099 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89bc2ac6-f510-4032-982d-810735ebf053-kube-api-access-s9qcm" (OuterVolumeSpecName: "kube-api-access-s9qcm") pod "89bc2ac6-f510-4032-982d-810735ebf053" (UID: "89bc2ac6-f510-4032-982d-810735ebf053"). InnerVolumeSpecName "kube-api-access-s9qcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:56:19 crc kubenswrapper[4664]: I1013 08:56:19.954204 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89bc2ac6-f510-4032-982d-810735ebf053" (UID: "89bc2ac6-f510-4032-982d-810735ebf053"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.021839 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.021880 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9qcm\" (UniqueName: \"kubernetes.io/projected/89bc2ac6-f510-4032-982d-810735ebf053-kube-api-access-s9qcm\") on node \"crc\" DevicePath \"\"" Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.021895 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89bc2ac6-f510-4032-982d-810735ebf053-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.303366 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fbfp" event={"ID":"89bc2ac6-f510-4032-982d-810735ebf053","Type":"ContainerDied","Data":"a680732d458254ca6439a8435bca6fbfd887c891bfd1d681e49813f289e64177"} Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.303461 4664 scope.go:117] "RemoveContainer" containerID="e8e3f40655d958d724b1c0908d060c469111e0eebd93b162b51d8a807b093367" Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.303480 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fbfp" Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.346218 4664 scope.go:117] "RemoveContainer" containerID="085498fe4234cdcea85d4e5e986fe12bea372dc5b28fe4aff6fc890427ecb102" Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.354972 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4fbfp"] Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.363547 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4fbfp"] Oct 13 08:56:20 crc kubenswrapper[4664]: I1013 08:56:20.373598 4664 scope.go:117] "RemoveContainer" containerID="bec892c70bdd89744e568975c5b01f981997a7daf4dd46dba027c17ccf0a7076" Oct 13 08:56:21 crc kubenswrapper[4664]: I1013 08:56:21.064152 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89bc2ac6-f510-4032-982d-810735ebf053" path="/var/lib/kubelet/pods/89bc2ac6-f510-4032-982d-810735ebf053/volumes" Oct 13 08:57:58 crc kubenswrapper[4664]: I1013 08:57:58.813575 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:57:58 crc kubenswrapper[4664]: I1013 08:57:58.815281 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:58:28 crc kubenswrapper[4664]: I1013 08:58:28.812383 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:58:28 crc kubenswrapper[4664]: I1013 08:58:28.812967 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:58:58 crc kubenswrapper[4664]: I1013 08:58:58.811819 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 08:58:58 crc kubenswrapper[4664]: I1013 08:58:58.812253 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 08:58:58 crc kubenswrapper[4664]: I1013 08:58:58.812294 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 08:58:58 crc kubenswrapper[4664]: I1013 08:58:58.813236 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d31947d1482d089981ff433280849da51a3bf3253ba678d4ac07eb297c661118"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 08:58:58 crc kubenswrapper[4664]: I1013 08:58:58.813342 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://d31947d1482d089981ff433280849da51a3bf3253ba678d4ac07eb297c661118" gracePeriod=600 Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.085422 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="d31947d1482d089981ff433280849da51a3bf3253ba678d4ac07eb297c661118" exitCode=0 Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.085477 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"d31947d1482d089981ff433280849da51a3bf3253ba678d4ac07eb297c661118"} Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.085515 4664 scope.go:117] "RemoveContainer" containerID="5ecaf3b9d4e4fa02a162a2a77e4ff97b6424a89c6345873012334307d42e1e97" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.488375 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zvdnr"] Oct 13 08:58:59 crc kubenswrapper[4664]: E1013 08:58:59.489016 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="registry-server" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.489042 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="registry-server" Oct 13 08:58:59 crc kubenswrapper[4664]: E1013 08:58:59.489095 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="extract-content" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.489108 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="extract-content" Oct 13 08:58:59 crc kubenswrapper[4664]: E1013 08:58:59.489124 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="extract-utilities" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.489135 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="extract-utilities" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.489488 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="89bc2ac6-f510-4032-982d-810735ebf053" containerName="registry-server" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.491945 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.497631 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpxxd\" (UniqueName: \"kubernetes.io/projected/804effaf-2b86-4421-9b4d-b98ea737e3b5-kube-api-access-mpxxd\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.498055 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-utilities\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.498145 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-catalog-content\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.499172 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zvdnr"] Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.600403 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-utilities\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.600488 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-catalog-content\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.600570 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpxxd\" (UniqueName: \"kubernetes.io/projected/804effaf-2b86-4421-9b4d-b98ea737e3b5-kube-api-access-mpxxd\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.601544 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-utilities\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.601838 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-catalog-content\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.630249 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpxxd\" (UniqueName: \"kubernetes.io/projected/804effaf-2b86-4421-9b4d-b98ea737e3b5-kube-api-access-mpxxd\") pod \"certified-operators-zvdnr\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:58:59 crc kubenswrapper[4664]: I1013 08:58:59.812746 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:59:00 crc kubenswrapper[4664]: I1013 08:59:00.104128 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426"} Oct 13 08:59:00 crc kubenswrapper[4664]: I1013 08:59:00.587262 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zvdnr"] Oct 13 08:59:00 crc kubenswrapper[4664]: W1013 08:59:00.613885 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod804effaf_2b86_4421_9b4d_b98ea737e3b5.slice/crio-d29b0e82e39a8d97b034c7e219a80ee483b773011d5ae30f646e92f6fff9ba5d WatchSource:0}: Error finding container d29b0e82e39a8d97b034c7e219a80ee483b773011d5ae30f646e92f6fff9ba5d: Status 404 returned error can't find the container with id d29b0e82e39a8d97b034c7e219a80ee483b773011d5ae30f646e92f6fff9ba5d Oct 13 08:59:01 crc kubenswrapper[4664]: I1013 08:59:01.122160 4664 generic.go:334] "Generic (PLEG): container finished" podID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerID="ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed" exitCode=0 Oct 13 08:59:01 crc kubenswrapper[4664]: I1013 08:59:01.122735 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvdnr" event={"ID":"804effaf-2b86-4421-9b4d-b98ea737e3b5","Type":"ContainerDied","Data":"ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed"} Oct 13 08:59:01 crc kubenswrapper[4664]: I1013 08:59:01.125253 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvdnr" event={"ID":"804effaf-2b86-4421-9b4d-b98ea737e3b5","Type":"ContainerStarted","Data":"d29b0e82e39a8d97b034c7e219a80ee483b773011d5ae30f646e92f6fff9ba5d"} Oct 13 08:59:01 crc kubenswrapper[4664]: I1013 08:59:01.125635 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 08:59:03 crc kubenswrapper[4664]: I1013 08:59:03.147317 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvdnr" event={"ID":"804effaf-2b86-4421-9b4d-b98ea737e3b5","Type":"ContainerStarted","Data":"d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018"} Oct 13 08:59:05 crc kubenswrapper[4664]: I1013 08:59:05.171263 4664 generic.go:334] "Generic (PLEG): container finished" podID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerID="d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018" exitCode=0 Oct 13 08:59:05 crc kubenswrapper[4664]: I1013 08:59:05.171349 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvdnr" event={"ID":"804effaf-2b86-4421-9b4d-b98ea737e3b5","Type":"ContainerDied","Data":"d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018"} Oct 13 08:59:06 crc kubenswrapper[4664]: I1013 08:59:06.181925 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvdnr" event={"ID":"804effaf-2b86-4421-9b4d-b98ea737e3b5","Type":"ContainerStarted","Data":"cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8"} Oct 13 08:59:06 crc kubenswrapper[4664]: I1013 08:59:06.208590 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zvdnr" podStartSLOduration=2.460401807 podStartE2EDuration="7.208571268s" podCreationTimestamp="2025-10-13 08:58:59 +0000 UTC" firstStartedPulling="2025-10-13 08:59:01.124009374 +0000 UTC m=+7948.811454556" lastFinishedPulling="2025-10-13 08:59:05.872178815 +0000 UTC m=+7953.559624017" observedRunningTime="2025-10-13 08:59:06.205041463 +0000 UTC m=+7953.892486655" watchObservedRunningTime="2025-10-13 08:59:06.208571268 +0000 UTC m=+7953.896016450" Oct 13 08:59:09 crc kubenswrapper[4664]: I1013 08:59:09.814240 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:59:09 crc kubenswrapper[4664]: I1013 08:59:09.815746 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:59:10 crc kubenswrapper[4664]: I1013 08:59:10.883135 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-zvdnr" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="registry-server" probeResult="failure" output=< Oct 13 08:59:10 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:59:10 crc kubenswrapper[4664]: > Oct 13 08:59:20 crc kubenswrapper[4664]: I1013 08:59:20.880790 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-zvdnr" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="registry-server" probeResult="failure" output=< Oct 13 08:59:20 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 08:59:20 crc kubenswrapper[4664]: > Oct 13 08:59:20 crc kubenswrapper[4664]: I1013 08:59:20.928077 4664 patch_prober.go:28] interesting pod/controller-manager-67d595f8b9-5ff2q container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.52:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 13 08:59:20 crc kubenswrapper[4664]: I1013 08:59:20.928174 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-67d595f8b9-5ff2q" podUID="5c9254a8-804c-462f-b06a-0016170cb46c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.52:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 13 08:59:27 crc kubenswrapper[4664]: E1013 08:59:27.404860 4664 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:52446->38.102.83.223:37357: write tcp 38.102.83.223:52446->38.102.83.223:37357: write: connection reset by peer Oct 13 08:59:29 crc kubenswrapper[4664]: I1013 08:59:29.895863 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:59:29 crc kubenswrapper[4664]: I1013 08:59:29.959140 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:59:30 crc kubenswrapper[4664]: I1013 08:59:30.678170 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zvdnr"] Oct 13 08:59:31 crc kubenswrapper[4664]: I1013 08:59:31.416366 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zvdnr" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="registry-server" containerID="cri-o://cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8" gracePeriod=2 Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.180542 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.328259 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-utilities\") pod \"804effaf-2b86-4421-9b4d-b98ea737e3b5\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.329145 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-utilities" (OuterVolumeSpecName: "utilities") pod "804effaf-2b86-4421-9b4d-b98ea737e3b5" (UID: "804effaf-2b86-4421-9b4d-b98ea737e3b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.329542 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-catalog-content\") pod \"804effaf-2b86-4421-9b4d-b98ea737e3b5\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.329925 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpxxd\" (UniqueName: \"kubernetes.io/projected/804effaf-2b86-4421-9b4d-b98ea737e3b5-kube-api-access-mpxxd\") pod \"804effaf-2b86-4421-9b4d-b98ea737e3b5\" (UID: \"804effaf-2b86-4421-9b4d-b98ea737e3b5\") " Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.332125 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.343789 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/804effaf-2b86-4421-9b4d-b98ea737e3b5-kube-api-access-mpxxd" (OuterVolumeSpecName: "kube-api-access-mpxxd") pod "804effaf-2b86-4421-9b4d-b98ea737e3b5" (UID: "804effaf-2b86-4421-9b4d-b98ea737e3b5"). InnerVolumeSpecName "kube-api-access-mpxxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.418971 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "804effaf-2b86-4421-9b4d-b98ea737e3b5" (UID: "804effaf-2b86-4421-9b4d-b98ea737e3b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.432750 4664 generic.go:334] "Generic (PLEG): container finished" podID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerID="cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8" exitCode=0 Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.432812 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvdnr" event={"ID":"804effaf-2b86-4421-9b4d-b98ea737e3b5","Type":"ContainerDied","Data":"cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8"} Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.432903 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvdnr" event={"ID":"804effaf-2b86-4421-9b4d-b98ea737e3b5","Type":"ContainerDied","Data":"d29b0e82e39a8d97b034c7e219a80ee483b773011d5ae30f646e92f6fff9ba5d"} Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.432838 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvdnr" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.432934 4664 scope.go:117] "RemoveContainer" containerID="cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.434371 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804effaf-2b86-4421-9b4d-b98ea737e3b5-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.434432 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpxxd\" (UniqueName: \"kubernetes.io/projected/804effaf-2b86-4421-9b4d-b98ea737e3b5-kube-api-access-mpxxd\") on node \"crc\" DevicePath \"\"" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.494186 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zvdnr"] Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.506929 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zvdnr"] Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.517240 4664 scope.go:117] "RemoveContainer" containerID="d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.550753 4664 scope.go:117] "RemoveContainer" containerID="ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.590106 4664 scope.go:117] "RemoveContainer" containerID="cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8" Oct 13 08:59:32 crc kubenswrapper[4664]: E1013 08:59:32.594887 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8\": container with ID starting with cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8 not found: ID does not exist" containerID="cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.595463 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8"} err="failed to get container status \"cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8\": rpc error: code = NotFound desc = could not find container \"cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8\": container with ID starting with cb7db1bcef1016b294dc8e1db14f75d83acc8260fb4964a62d8987fd75000ee8 not found: ID does not exist" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.595511 4664 scope.go:117] "RemoveContainer" containerID="d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018" Oct 13 08:59:32 crc kubenswrapper[4664]: E1013 08:59:32.596030 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018\": container with ID starting with d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018 not found: ID does not exist" containerID="d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.596088 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018"} err="failed to get container status \"d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018\": rpc error: code = NotFound desc = could not find container \"d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018\": container with ID starting with d75f6e1984b4dbffc6b082d7832919e89ff2689c958357cf9a3d496b3c02e018 not found: ID does not exist" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.596128 4664 scope.go:117] "RemoveContainer" containerID="ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed" Oct 13 08:59:32 crc kubenswrapper[4664]: E1013 08:59:32.596571 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed\": container with ID starting with ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed not found: ID does not exist" containerID="ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed" Oct 13 08:59:32 crc kubenswrapper[4664]: I1013 08:59:32.596622 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed"} err="failed to get container status \"ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed\": rpc error: code = NotFound desc = could not find container \"ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed\": container with ID starting with ed7d4a63fdf43fae1918c015e8910e1d019ed263a59e3967092c45a0ab7f73ed not found: ID does not exist" Oct 13 08:59:33 crc kubenswrapper[4664]: I1013 08:59:33.058536 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" path="/var/lib/kubelet/pods/804effaf-2b86-4421-9b4d-b98ea737e3b5/volumes" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.299455 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz"] Oct 13 09:00:00 crc kubenswrapper[4664]: E1013 09:00:00.300432 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="registry-server" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.300448 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="registry-server" Oct 13 09:00:00 crc kubenswrapper[4664]: E1013 09:00:00.300470 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="extract-content" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.300479 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="extract-content" Oct 13 09:00:00 crc kubenswrapper[4664]: E1013 09:00:00.300499 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="extract-utilities" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.300509 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="extract-utilities" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.300789 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="804effaf-2b86-4421-9b4d-b98ea737e3b5" containerName="registry-server" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.301612 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.318974 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz"] Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.335108 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsk94\" (UniqueName: \"kubernetes.io/projected/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-kube-api-access-vsk94\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.335174 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-secret-volume\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.335592 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-config-volume\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.348534 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.392952 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.437170 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-config-volume\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.437301 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsk94\" (UniqueName: \"kubernetes.io/projected/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-kube-api-access-vsk94\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.437349 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-secret-volume\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.438055 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-config-volume\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.456731 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-secret-volume\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.457305 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsk94\" (UniqueName: \"kubernetes.io/projected/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-kube-api-access-vsk94\") pod \"collect-profiles-29339100-h48kz\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:00 crc kubenswrapper[4664]: I1013 09:00:00.634625 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:01 crc kubenswrapper[4664]: I1013 09:00:01.176267 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz"] Oct 13 09:00:01 crc kubenswrapper[4664]: W1013 09:00:01.183911 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod567af6a1_6d36_4ef4_bb12_13f9c8ae985f.slice/crio-df8e462eb3e2a0c57b0beb866c42261ec2c16cbce8c3ac7919d007445365345e WatchSource:0}: Error finding container df8e462eb3e2a0c57b0beb866c42261ec2c16cbce8c3ac7919d007445365345e: Status 404 returned error can't find the container with id df8e462eb3e2a0c57b0beb866c42261ec2c16cbce8c3ac7919d007445365345e Oct 13 09:00:01 crc kubenswrapper[4664]: I1013 09:00:01.742014 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" event={"ID":"567af6a1-6d36-4ef4-bb12-13f9c8ae985f","Type":"ContainerStarted","Data":"490c33bb24dcad9205355291914abbf3343c1a4a3be3226edfe6e43094ab7ce2"} Oct 13 09:00:01 crc kubenswrapper[4664]: I1013 09:00:01.742319 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" event={"ID":"567af6a1-6d36-4ef4-bb12-13f9c8ae985f","Type":"ContainerStarted","Data":"df8e462eb3e2a0c57b0beb866c42261ec2c16cbce8c3ac7919d007445365345e"} Oct 13 09:00:01 crc kubenswrapper[4664]: I1013 09:00:01.763822 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" podStartSLOduration=1.7637812419999999 podStartE2EDuration="1.763781242s" podCreationTimestamp="2025-10-13 09:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 09:00:01.75736427 +0000 UTC m=+8009.444809492" watchObservedRunningTime="2025-10-13 09:00:01.763781242 +0000 UTC m=+8009.451226434" Oct 13 09:00:02 crc kubenswrapper[4664]: I1013 09:00:02.790199 4664 generic.go:334] "Generic (PLEG): container finished" podID="567af6a1-6d36-4ef4-bb12-13f9c8ae985f" containerID="490c33bb24dcad9205355291914abbf3343c1a4a3be3226edfe6e43094ab7ce2" exitCode=0 Oct 13 09:00:02 crc kubenswrapper[4664]: I1013 09:00:02.790418 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" event={"ID":"567af6a1-6d36-4ef4-bb12-13f9c8ae985f","Type":"ContainerDied","Data":"490c33bb24dcad9205355291914abbf3343c1a4a3be3226edfe6e43094ab7ce2"} Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.150420 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.290673 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-config-volume\") pod \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.290974 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-secret-volume\") pod \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.291094 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsk94\" (UniqueName: \"kubernetes.io/projected/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-kube-api-access-vsk94\") pod \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\" (UID: \"567af6a1-6d36-4ef4-bb12-13f9c8ae985f\") " Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.291599 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-config-volume" (OuterVolumeSpecName: "config-volume") pod "567af6a1-6d36-4ef4-bb12-13f9c8ae985f" (UID: "567af6a1-6d36-4ef4-bb12-13f9c8ae985f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.296072 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-kube-api-access-vsk94" (OuterVolumeSpecName: "kube-api-access-vsk94") pod "567af6a1-6d36-4ef4-bb12-13f9c8ae985f" (UID: "567af6a1-6d36-4ef4-bb12-13f9c8ae985f"). InnerVolumeSpecName "kube-api-access-vsk94". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.296308 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "567af6a1-6d36-4ef4-bb12-13f9c8ae985f" (UID: "567af6a1-6d36-4ef4-bb12-13f9c8ae985f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.393685 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.393721 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.393732 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsk94\" (UniqueName: \"kubernetes.io/projected/567af6a1-6d36-4ef4-bb12-13f9c8ae985f-kube-api-access-vsk94\") on node \"crc\" DevicePath \"\"" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.815687 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" event={"ID":"567af6a1-6d36-4ef4-bb12-13f9c8ae985f","Type":"ContainerDied","Data":"df8e462eb3e2a0c57b0beb866c42261ec2c16cbce8c3ac7919d007445365345e"} Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.815747 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339100-h48kz" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.815997 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df8e462eb3e2a0c57b0beb866c42261ec2c16cbce8c3ac7919d007445365345e" Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.874188 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm"] Oct 13 09:00:04 crc kubenswrapper[4664]: I1013 09:00:04.884923 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339055-xq5qm"] Oct 13 09:00:05 crc kubenswrapper[4664]: I1013 09:00:05.059983 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d4c6351-6377-499a-88c9-d9e24c5dcb82" path="/var/lib/kubelet/pods/0d4c6351-6377-499a-88c9-d9e24c5dcb82/volumes" Oct 13 09:00:44 crc kubenswrapper[4664]: I1013 09:00:44.382516 4664 scope.go:117] "RemoveContainer" containerID="a5b42ea02ac8b153ff2a58b5d0e458a558e5398623bcd42d3c5150091c6d742e" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.158328 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29339101-zkldc"] Oct 13 09:01:00 crc kubenswrapper[4664]: E1013 09:01:00.159107 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="567af6a1-6d36-4ef4-bb12-13f9c8ae985f" containerName="collect-profiles" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.159121 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="567af6a1-6d36-4ef4-bb12-13f9c8ae985f" containerName="collect-profiles" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.159342 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="567af6a1-6d36-4ef4-bb12-13f9c8ae985f" containerName="collect-profiles" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.162480 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.192707 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29339101-zkldc"] Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.314306 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-fernet-keys\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.314641 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-config-data\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.315030 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-combined-ca-bundle\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.315313 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpm9f\" (UniqueName: \"kubernetes.io/projected/eca6f5d0-1798-4440-a991-f02266967bb0-kube-api-access-dpm9f\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.417580 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-config-data\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.417670 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-combined-ca-bundle\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.417736 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpm9f\" (UniqueName: \"kubernetes.io/projected/eca6f5d0-1798-4440-a991-f02266967bb0-kube-api-access-dpm9f\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.417798 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-fernet-keys\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.427699 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-combined-ca-bundle\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.429205 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-fernet-keys\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.437783 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-config-data\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.460553 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpm9f\" (UniqueName: \"kubernetes.io/projected/eca6f5d0-1798-4440-a991-f02266967bb0-kube-api-access-dpm9f\") pod \"keystone-cron-29339101-zkldc\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:00 crc kubenswrapper[4664]: I1013 09:01:00.490194 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:01 crc kubenswrapper[4664]: I1013 09:01:01.140445 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29339101-zkldc"] Oct 13 09:01:01 crc kubenswrapper[4664]: I1013 09:01:01.396877 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339101-zkldc" event={"ID":"eca6f5d0-1798-4440-a991-f02266967bb0","Type":"ContainerStarted","Data":"c0582950807b047981e3b3df2839db4c919b571dc8dd0c4d817a49aa4a702ad6"} Oct 13 09:01:01 crc kubenswrapper[4664]: I1013 09:01:01.396934 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339101-zkldc" event={"ID":"eca6f5d0-1798-4440-a991-f02266967bb0","Type":"ContainerStarted","Data":"fa23a2369e6af65a4881d05fcc6ed320464caf3ea78343e8cae735827f79bc61"} Oct 13 09:01:01 crc kubenswrapper[4664]: I1013 09:01:01.422720 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29339101-zkldc" podStartSLOduration=1.42270082 podStartE2EDuration="1.42270082s" podCreationTimestamp="2025-10-13 09:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 09:01:01.415416615 +0000 UTC m=+8069.102861827" watchObservedRunningTime="2025-10-13 09:01:01.42270082 +0000 UTC m=+8069.110146012" Oct 13 09:01:06 crc kubenswrapper[4664]: I1013 09:01:06.447302 4664 generic.go:334] "Generic (PLEG): container finished" podID="eca6f5d0-1798-4440-a991-f02266967bb0" containerID="c0582950807b047981e3b3df2839db4c919b571dc8dd0c4d817a49aa4a702ad6" exitCode=0 Oct 13 09:01:06 crc kubenswrapper[4664]: I1013 09:01:06.447502 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339101-zkldc" event={"ID":"eca6f5d0-1798-4440-a991-f02266967bb0","Type":"ContainerDied","Data":"c0582950807b047981e3b3df2839db4c919b571dc8dd0c4d817a49aa4a702ad6"} Oct 13 09:01:07 crc kubenswrapper[4664]: I1013 09:01:07.949604 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.056711 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-combined-ca-bundle\") pod \"eca6f5d0-1798-4440-a991-f02266967bb0\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.056813 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-fernet-keys\") pod \"eca6f5d0-1798-4440-a991-f02266967bb0\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.056953 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpm9f\" (UniqueName: \"kubernetes.io/projected/eca6f5d0-1798-4440-a991-f02266967bb0-kube-api-access-dpm9f\") pod \"eca6f5d0-1798-4440-a991-f02266967bb0\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.057001 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-config-data\") pod \"eca6f5d0-1798-4440-a991-f02266967bb0\" (UID: \"eca6f5d0-1798-4440-a991-f02266967bb0\") " Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.063709 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eca6f5d0-1798-4440-a991-f02266967bb0-kube-api-access-dpm9f" (OuterVolumeSpecName: "kube-api-access-dpm9f") pod "eca6f5d0-1798-4440-a991-f02266967bb0" (UID: "eca6f5d0-1798-4440-a991-f02266967bb0"). InnerVolumeSpecName "kube-api-access-dpm9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.081912 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "eca6f5d0-1798-4440-a991-f02266967bb0" (UID: "eca6f5d0-1798-4440-a991-f02266967bb0"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.106005 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eca6f5d0-1798-4440-a991-f02266967bb0" (UID: "eca6f5d0-1798-4440-a991-f02266967bb0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.148210 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-config-data" (OuterVolumeSpecName: "config-data") pod "eca6f5d0-1798-4440-a991-f02266967bb0" (UID: "eca6f5d0-1798-4440-a991-f02266967bb0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.161303 4664 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.161372 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpm9f\" (UniqueName: \"kubernetes.io/projected/eca6f5d0-1798-4440-a991-f02266967bb0-kube-api-access-dpm9f\") on node \"crc\" DevicePath \"\"" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.161393 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.161410 4664 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca6f5d0-1798-4440-a991-f02266967bb0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.489110 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29339101-zkldc" event={"ID":"eca6f5d0-1798-4440-a991-f02266967bb0","Type":"ContainerDied","Data":"fa23a2369e6af65a4881d05fcc6ed320464caf3ea78343e8cae735827f79bc61"} Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.489158 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa23a2369e6af65a4881d05fcc6ed320464caf3ea78343e8cae735827f79bc61" Oct 13 09:01:08 crc kubenswrapper[4664]: I1013 09:01:08.489225 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29339101-zkldc" Oct 13 09:01:28 crc kubenswrapper[4664]: I1013 09:01:28.811894 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:01:28 crc kubenswrapper[4664]: I1013 09:01:28.812522 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:01:58 crc kubenswrapper[4664]: I1013 09:01:58.812302 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:01:58 crc kubenswrapper[4664]: I1013 09:01:58.813070 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:02:28 crc kubenswrapper[4664]: I1013 09:02:28.812555 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:02:28 crc kubenswrapper[4664]: I1013 09:02:28.813354 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:02:28 crc kubenswrapper[4664]: I1013 09:02:28.813434 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 09:02:28 crc kubenswrapper[4664]: I1013 09:02:28.817105 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 09:02:28 crc kubenswrapper[4664]: I1013 09:02:28.817247 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" gracePeriod=600 Oct 13 09:02:28 crc kubenswrapper[4664]: E1013 09:02:28.950553 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:02:29 crc kubenswrapper[4664]: I1013 09:02:29.379228 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" exitCode=0 Oct 13 09:02:29 crc kubenswrapper[4664]: I1013 09:02:29.379306 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426"} Oct 13 09:02:29 crc kubenswrapper[4664]: I1013 09:02:29.379644 4664 scope.go:117] "RemoveContainer" containerID="d31947d1482d089981ff433280849da51a3bf3253ba678d4ac07eb297c661118" Oct 13 09:02:29 crc kubenswrapper[4664]: I1013 09:02:29.381032 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:02:29 crc kubenswrapper[4664]: E1013 09:02:29.381584 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:02:45 crc kubenswrapper[4664]: I1013 09:02:45.046847 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:02:45 crc kubenswrapper[4664]: E1013 09:02:45.047616 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:02:59 crc kubenswrapper[4664]: I1013 09:02:59.047142 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:02:59 crc kubenswrapper[4664]: E1013 09:02:59.047901 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:03:10 crc kubenswrapper[4664]: I1013 09:03:10.047463 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:03:10 crc kubenswrapper[4664]: E1013 09:03:10.048595 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:03:22 crc kubenswrapper[4664]: I1013 09:03:22.046533 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:03:22 crc kubenswrapper[4664]: E1013 09:03:22.047090 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:03:35 crc kubenswrapper[4664]: I1013 09:03:35.047557 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:03:35 crc kubenswrapper[4664]: E1013 09:03:35.051634 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:03:48 crc kubenswrapper[4664]: I1013 09:03:48.046529 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:03:48 crc kubenswrapper[4664]: E1013 09:03:48.047240 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:03:59 crc kubenswrapper[4664]: I1013 09:03:59.047834 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:03:59 crc kubenswrapper[4664]: E1013 09:03:59.049192 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:04:12 crc kubenswrapper[4664]: I1013 09:04:12.048657 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:04:12 crc kubenswrapper[4664]: E1013 09:04:12.049393 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:04:24 crc kubenswrapper[4664]: I1013 09:04:24.048036 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:04:24 crc kubenswrapper[4664]: E1013 09:04:24.048632 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.047099 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:04:36 crc kubenswrapper[4664]: E1013 09:04:36.047659 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.665568 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-md5fn"] Oct 13 09:04:36 crc kubenswrapper[4664]: E1013 09:04:36.666552 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eca6f5d0-1798-4440-a991-f02266967bb0" containerName="keystone-cron" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.666679 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="eca6f5d0-1798-4440-a991-f02266967bb0" containerName="keystone-cron" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.667249 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="eca6f5d0-1798-4440-a991-f02266967bb0" containerName="keystone-cron" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.669792 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.675969 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-md5fn"] Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.755732 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-catalog-content\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.755918 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-utilities\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.756054 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqvwb\" (UniqueName: \"kubernetes.io/projected/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-kube-api-access-zqvwb\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.857783 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-catalog-content\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.857983 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-utilities\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.858089 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqvwb\" (UniqueName: \"kubernetes.io/projected/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-kube-api-access-zqvwb\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.858476 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-catalog-content\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.859151 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-utilities\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:36 crc kubenswrapper[4664]: I1013 09:04:36.881860 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqvwb\" (UniqueName: \"kubernetes.io/projected/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-kube-api-access-zqvwb\") pod \"redhat-operators-md5fn\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:37 crc kubenswrapper[4664]: I1013 09:04:37.002702 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:37 crc kubenswrapper[4664]: I1013 09:04:37.539178 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-md5fn"] Oct 13 09:04:37 crc kubenswrapper[4664]: I1013 09:04:37.776752 4664 generic.go:334] "Generic (PLEG): container finished" podID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerID="ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021" exitCode=0 Oct 13 09:04:37 crc kubenswrapper[4664]: I1013 09:04:37.776808 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md5fn" event={"ID":"3a011d78-383e-4c8a-89fd-3bdf7fe087f3","Type":"ContainerDied","Data":"ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021"} Oct 13 09:04:37 crc kubenswrapper[4664]: I1013 09:04:37.776837 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md5fn" event={"ID":"3a011d78-383e-4c8a-89fd-3bdf7fe087f3","Type":"ContainerStarted","Data":"0f329ec9b8f2574c3ad267ac9469410a68e902e4bbeee23c505b3a8191ab1222"} Oct 13 09:04:37 crc kubenswrapper[4664]: I1013 09:04:37.779206 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 09:04:38 crc kubenswrapper[4664]: I1013 09:04:38.787537 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md5fn" event={"ID":"3a011d78-383e-4c8a-89fd-3bdf7fe087f3","Type":"ContainerStarted","Data":"21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5"} Oct 13 09:04:42 crc kubenswrapper[4664]: I1013 09:04:42.844760 4664 generic.go:334] "Generic (PLEG): container finished" podID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerID="21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5" exitCode=0 Oct 13 09:04:42 crc kubenswrapper[4664]: I1013 09:04:42.845293 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md5fn" event={"ID":"3a011d78-383e-4c8a-89fd-3bdf7fe087f3","Type":"ContainerDied","Data":"21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5"} Oct 13 09:04:43 crc kubenswrapper[4664]: I1013 09:04:43.864504 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md5fn" event={"ID":"3a011d78-383e-4c8a-89fd-3bdf7fe087f3","Type":"ContainerStarted","Data":"6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212"} Oct 13 09:04:43 crc kubenswrapper[4664]: I1013 09:04:43.901226 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-md5fn" podStartSLOduration=2.300900077 podStartE2EDuration="7.90119741s" podCreationTimestamp="2025-10-13 09:04:36 +0000 UTC" firstStartedPulling="2025-10-13 09:04:37.778837737 +0000 UTC m=+8285.466282939" lastFinishedPulling="2025-10-13 09:04:43.37913507 +0000 UTC m=+8291.066580272" observedRunningTime="2025-10-13 09:04:43.895560258 +0000 UTC m=+8291.583005470" watchObservedRunningTime="2025-10-13 09:04:43.90119741 +0000 UTC m=+8291.588642602" Oct 13 09:04:47 crc kubenswrapper[4664]: I1013 09:04:47.003815 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:47 crc kubenswrapper[4664]: I1013 09:04:47.004201 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:04:48 crc kubenswrapper[4664]: I1013 09:04:48.047888 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-md5fn" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="registry-server" probeResult="failure" output=< Oct 13 09:04:48 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 09:04:48 crc kubenswrapper[4664]: > Oct 13 09:04:51 crc kubenswrapper[4664]: I1013 09:04:51.047088 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:04:51 crc kubenswrapper[4664]: E1013 09:04:51.047894 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:04:58 crc kubenswrapper[4664]: I1013 09:04:58.068968 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-md5fn" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="registry-server" probeResult="failure" output=< Oct 13 09:04:58 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 09:04:58 crc kubenswrapper[4664]: > Oct 13 09:05:03 crc kubenswrapper[4664]: I1013 09:05:03.060137 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:05:03 crc kubenswrapper[4664]: E1013 09:05:03.063201 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:05:05 crc kubenswrapper[4664]: I1013 09:05:05.828638 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wnzjk"] Oct 13 09:05:05 crc kubenswrapper[4664]: I1013 09:05:05.831549 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:05 crc kubenswrapper[4664]: I1013 09:05:05.847155 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wnzjk"] Oct 13 09:05:05 crc kubenswrapper[4664]: I1013 09:05:05.931101 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7qnr\" (UniqueName: \"kubernetes.io/projected/08a9405b-9db9-4ae0-b25c-21ac477f565b-kube-api-access-x7qnr\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:05 crc kubenswrapper[4664]: I1013 09:05:05.931259 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-catalog-content\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:05 crc kubenswrapper[4664]: I1013 09:05:05.931367 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-utilities\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:06 crc kubenswrapper[4664]: I1013 09:05:06.033405 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7qnr\" (UniqueName: \"kubernetes.io/projected/08a9405b-9db9-4ae0-b25c-21ac477f565b-kube-api-access-x7qnr\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:06 crc kubenswrapper[4664]: I1013 09:05:06.033487 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-catalog-content\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:06 crc kubenswrapper[4664]: I1013 09:05:06.033547 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-utilities\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:06 crc kubenswrapper[4664]: I1013 09:05:06.034028 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-utilities\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:06 crc kubenswrapper[4664]: I1013 09:05:06.034536 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-catalog-content\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:06 crc kubenswrapper[4664]: I1013 09:05:06.078062 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7qnr\" (UniqueName: \"kubernetes.io/projected/08a9405b-9db9-4ae0-b25c-21ac477f565b-kube-api-access-x7qnr\") pod \"redhat-marketplace-wnzjk\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:06 crc kubenswrapper[4664]: I1013 09:05:06.152960 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:07 crc kubenswrapper[4664]: I1013 09:05:07.199645 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wnzjk"] Oct 13 09:05:08 crc kubenswrapper[4664]: I1013 09:05:08.052376 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-md5fn" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="registry-server" probeResult="failure" output=< Oct 13 09:05:08 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 09:05:08 crc kubenswrapper[4664]: > Oct 13 09:05:08 crc kubenswrapper[4664]: I1013 09:05:08.128718 4664 generic.go:334] "Generic (PLEG): container finished" podID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerID="31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2" exitCode=0 Oct 13 09:05:08 crc kubenswrapper[4664]: I1013 09:05:08.128843 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wnzjk" event={"ID":"08a9405b-9db9-4ae0-b25c-21ac477f565b","Type":"ContainerDied","Data":"31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2"} Oct 13 09:05:08 crc kubenswrapper[4664]: I1013 09:05:08.128876 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wnzjk" event={"ID":"08a9405b-9db9-4ae0-b25c-21ac477f565b","Type":"ContainerStarted","Data":"2b696e6fb9a6633ef7392ef7d18a22b219cdf58c36ef6c73767961bf4bfaf9be"} Oct 13 09:05:09 crc kubenswrapper[4664]: I1013 09:05:09.139962 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wnzjk" event={"ID":"08a9405b-9db9-4ae0-b25c-21ac477f565b","Type":"ContainerStarted","Data":"59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e"} Oct 13 09:05:10 crc kubenswrapper[4664]: I1013 09:05:10.150968 4664 generic.go:334] "Generic (PLEG): container finished" podID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerID="59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e" exitCode=0 Oct 13 09:05:10 crc kubenswrapper[4664]: I1013 09:05:10.151267 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wnzjk" event={"ID":"08a9405b-9db9-4ae0-b25c-21ac477f565b","Type":"ContainerDied","Data":"59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e"} Oct 13 09:05:12 crc kubenswrapper[4664]: I1013 09:05:12.172060 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wnzjk" event={"ID":"08a9405b-9db9-4ae0-b25c-21ac477f565b","Type":"ContainerStarted","Data":"f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252"} Oct 13 09:05:16 crc kubenswrapper[4664]: I1013 09:05:16.154083 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:16 crc kubenswrapper[4664]: I1013 09:05:16.154741 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:16 crc kubenswrapper[4664]: I1013 09:05:16.218241 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:16 crc kubenswrapper[4664]: I1013 09:05:16.247628 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wnzjk" podStartSLOduration=7.739989649 podStartE2EDuration="11.247600866s" podCreationTimestamp="2025-10-13 09:05:05 +0000 UTC" firstStartedPulling="2025-10-13 09:05:08.133721379 +0000 UTC m=+8315.821166571" lastFinishedPulling="2025-10-13 09:05:11.641332596 +0000 UTC m=+8319.328777788" observedRunningTime="2025-10-13 09:05:12.192361515 +0000 UTC m=+8319.879806707" watchObservedRunningTime="2025-10-13 09:05:16.247600866 +0000 UTC m=+8323.935046098" Oct 13 09:05:16 crc kubenswrapper[4664]: I1013 09:05:16.269529 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:16 crc kubenswrapper[4664]: I1013 09:05:16.463491 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wnzjk"] Oct 13 09:05:17 crc kubenswrapper[4664]: I1013 09:05:17.049009 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:05:17 crc kubenswrapper[4664]: E1013 09:05:17.049255 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:05:17 crc kubenswrapper[4664]: I1013 09:05:17.060536 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:05:17 crc kubenswrapper[4664]: I1013 09:05:17.117547 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.228674 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wnzjk" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="registry-server" containerID="cri-o://f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252" gracePeriod=2 Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.819820 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.869745 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-md5fn"] Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.870006 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-md5fn" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="registry-server" containerID="cri-o://6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212" gracePeriod=2 Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.873984 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7qnr\" (UniqueName: \"kubernetes.io/projected/08a9405b-9db9-4ae0-b25c-21ac477f565b-kube-api-access-x7qnr\") pod \"08a9405b-9db9-4ae0-b25c-21ac477f565b\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.874073 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-utilities\") pod \"08a9405b-9db9-4ae0-b25c-21ac477f565b\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.874199 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-catalog-content\") pod \"08a9405b-9db9-4ae0-b25c-21ac477f565b\" (UID: \"08a9405b-9db9-4ae0-b25c-21ac477f565b\") " Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.876519 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-utilities" (OuterVolumeSpecName: "utilities") pod "08a9405b-9db9-4ae0-b25c-21ac477f565b" (UID: "08a9405b-9db9-4ae0-b25c-21ac477f565b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.891477 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08a9405b-9db9-4ae0-b25c-21ac477f565b" (UID: "08a9405b-9db9-4ae0-b25c-21ac477f565b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.899025 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08a9405b-9db9-4ae0-b25c-21ac477f565b-kube-api-access-x7qnr" (OuterVolumeSpecName: "kube-api-access-x7qnr") pod "08a9405b-9db9-4ae0-b25c-21ac477f565b" (UID: "08a9405b-9db9-4ae0-b25c-21ac477f565b"). InnerVolumeSpecName "kube-api-access-x7qnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.976578 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.976619 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7qnr\" (UniqueName: \"kubernetes.io/projected/08a9405b-9db9-4ae0-b25c-21ac477f565b-kube-api-access-x7qnr\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:18 crc kubenswrapper[4664]: I1013 09:05:18.976632 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a9405b-9db9-4ae0-b25c-21ac477f565b-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.216593 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.238569 4664 generic.go:334] "Generic (PLEG): container finished" podID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerID="6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212" exitCode=0 Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.239211 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md5fn" event={"ID":"3a011d78-383e-4c8a-89fd-3bdf7fe087f3","Type":"ContainerDied","Data":"6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212"} Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.239292 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md5fn" event={"ID":"3a011d78-383e-4c8a-89fd-3bdf7fe087f3","Type":"ContainerDied","Data":"0f329ec9b8f2574c3ad267ac9469410a68e902e4bbeee23c505b3a8191ab1222"} Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.239367 4664 scope.go:117] "RemoveContainer" containerID="6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.239524 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md5fn" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.245585 4664 generic.go:334] "Generic (PLEG): container finished" podID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerID="f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252" exitCode=0 Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.245676 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wnzjk" event={"ID":"08a9405b-9db9-4ae0-b25c-21ac477f565b","Type":"ContainerDied","Data":"f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252"} Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.245740 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wnzjk" event={"ID":"08a9405b-9db9-4ae0-b25c-21ac477f565b","Type":"ContainerDied","Data":"2b696e6fb9a6633ef7392ef7d18a22b219cdf58c36ef6c73767961bf4bfaf9be"} Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.246220 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wnzjk" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.284906 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqvwb\" (UniqueName: \"kubernetes.io/projected/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-kube-api-access-zqvwb\") pod \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.285085 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-utilities\") pod \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.285173 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-catalog-content\") pod \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\" (UID: \"3a011d78-383e-4c8a-89fd-3bdf7fe087f3\") " Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.286360 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-utilities" (OuterVolumeSpecName: "utilities") pod "3a011d78-383e-4c8a-89fd-3bdf7fe087f3" (UID: "3a011d78-383e-4c8a-89fd-3bdf7fe087f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.286966 4664 scope.go:117] "RemoveContainer" containerID="21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.288929 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wnzjk"] Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.290122 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-kube-api-access-zqvwb" (OuterVolumeSpecName: "kube-api-access-zqvwb") pod "3a011d78-383e-4c8a-89fd-3bdf7fe087f3" (UID: "3a011d78-383e-4c8a-89fd-3bdf7fe087f3"). InnerVolumeSpecName "kube-api-access-zqvwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.298622 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wnzjk"] Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.327554 4664 scope.go:117] "RemoveContainer" containerID="ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.356079 4664 scope.go:117] "RemoveContainer" containerID="6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212" Oct 13 09:05:19 crc kubenswrapper[4664]: E1013 09:05:19.361565 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212\": container with ID starting with 6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212 not found: ID does not exist" containerID="6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.361623 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212"} err="failed to get container status \"6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212\": rpc error: code = NotFound desc = could not find container \"6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212\": container with ID starting with 6e768df65375f646aed85b536f35f0418e4f817a92969bcafc66a551866bb212 not found: ID does not exist" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.361669 4664 scope.go:117] "RemoveContainer" containerID="21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5" Oct 13 09:05:19 crc kubenswrapper[4664]: E1013 09:05:19.363553 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5\": container with ID starting with 21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5 not found: ID does not exist" containerID="21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.363632 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5"} err="failed to get container status \"21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5\": rpc error: code = NotFound desc = could not find container \"21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5\": container with ID starting with 21e14cff7025dfeeb2e76bf0a0eae499503e10922e7069bff5a9f927cacd82d5 not found: ID does not exist" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.363703 4664 scope.go:117] "RemoveContainer" containerID="ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021" Oct 13 09:05:19 crc kubenswrapper[4664]: E1013 09:05:19.364106 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021\": container with ID starting with ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021 not found: ID does not exist" containerID="ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.364132 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021"} err="failed to get container status \"ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021\": rpc error: code = NotFound desc = could not find container \"ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021\": container with ID starting with ebd68cc8feb2fd7c48c97a935c5873045314445c19cb2b37097d5f2480f3c021 not found: ID does not exist" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.364148 4664 scope.go:117] "RemoveContainer" containerID="f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.371200 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a011d78-383e-4c8a-89fd-3bdf7fe087f3" (UID: "3a011d78-383e-4c8a-89fd-3bdf7fe087f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.387499 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqvwb\" (UniqueName: \"kubernetes.io/projected/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-kube-api-access-zqvwb\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.387533 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.387546 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a011d78-383e-4c8a-89fd-3bdf7fe087f3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.404535 4664 scope.go:117] "RemoveContainer" containerID="59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.424364 4664 scope.go:117] "RemoveContainer" containerID="31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.470270 4664 scope.go:117] "RemoveContainer" containerID="f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252" Oct 13 09:05:19 crc kubenswrapper[4664]: E1013 09:05:19.470759 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252\": container with ID starting with f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252 not found: ID does not exist" containerID="f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.470813 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252"} err="failed to get container status \"f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252\": rpc error: code = NotFound desc = could not find container \"f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252\": container with ID starting with f9cd814f9eee6c4ca3c1ecc31367fa6c9648c70c423cc63282ae8129c510b252 not found: ID does not exist" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.470841 4664 scope.go:117] "RemoveContainer" containerID="59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e" Oct 13 09:05:19 crc kubenswrapper[4664]: E1013 09:05:19.471323 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e\": container with ID starting with 59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e not found: ID does not exist" containerID="59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.471352 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e"} err="failed to get container status \"59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e\": rpc error: code = NotFound desc = could not find container \"59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e\": container with ID starting with 59ddbc51036f107055ebfec260c67fb73f66629e3ac7abef1d97f8bd8ec8a95e not found: ID does not exist" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.471370 4664 scope.go:117] "RemoveContainer" containerID="31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2" Oct 13 09:05:19 crc kubenswrapper[4664]: E1013 09:05:19.471583 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2\": container with ID starting with 31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2 not found: ID does not exist" containerID="31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.471603 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2"} err="failed to get container status \"31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2\": rpc error: code = NotFound desc = could not find container \"31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2\": container with ID starting with 31aa5167ebc3e18dfe04f229b1dd58afbf441e0e40a48d847258fc623f01a6e2 not found: ID does not exist" Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.569183 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-md5fn"] Oct 13 09:05:19 crc kubenswrapper[4664]: I1013 09:05:19.577225 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-md5fn"] Oct 13 09:05:21 crc kubenswrapper[4664]: I1013 09:05:21.063330 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" path="/var/lib/kubelet/pods/08a9405b-9db9-4ae0-b25c-21ac477f565b/volumes" Oct 13 09:05:21 crc kubenswrapper[4664]: I1013 09:05:21.064436 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" path="/var/lib/kubelet/pods/3a011d78-383e-4c8a-89fd-3bdf7fe087f3/volumes" Oct 13 09:05:32 crc kubenswrapper[4664]: I1013 09:05:32.047094 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:05:32 crc kubenswrapper[4664]: E1013 09:05:32.047935 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:05:43 crc kubenswrapper[4664]: I1013 09:05:43.054658 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:05:43 crc kubenswrapper[4664]: E1013 09:05:43.055469 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:05:55 crc kubenswrapper[4664]: I1013 09:05:55.628482 4664 generic.go:334] "Generic (PLEG): container finished" podID="3be173c2-e112-49d2-8b3b-b4cd0ed730fb" containerID="54adc3e1d4ab387c6c872ed367c3a6d8f70ce603c57fd3e987b49b4e46a1de8b" exitCode=1 Oct 13 09:05:55 crc kubenswrapper[4664]: I1013 09:05:55.628707 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"3be173c2-e112-49d2-8b3b-b4cd0ed730fb","Type":"ContainerDied","Data":"54adc3e1d4ab387c6c872ed367c3a6d8f70ce603c57fd3e987b49b4e46a1de8b"} Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.453770 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.642857 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ca-certs\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.642908 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-config-data\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.642999 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config-secret\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.643031 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-temporary\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.643055 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.643090 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wcts\" (UniqueName: \"kubernetes.io/projected/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-kube-api-access-9wcts\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.643173 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-workdir\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.643210 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ssh-key\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.643343 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config\") pod \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\" (UID: \"3be173c2-e112-49d2-8b3b-b4cd0ed730fb\") " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.645431 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.649255 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.650289 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-config-data" (OuterVolumeSpecName: "config-data") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.650383 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-kube-api-access-9wcts" (OuterVolumeSpecName: "kube-api-access-9wcts") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "kube-api-access-9wcts". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.654001 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "test-operator-logs") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.659004 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"3be173c2-e112-49d2-8b3b-b4cd0ed730fb","Type":"ContainerDied","Data":"1880515aae867626d7b48b49c6a78dbec67138717679411bbedaf432f471e6bc"} Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.659166 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1880515aae867626d7b48b49c6a78dbec67138717679411bbedaf432f471e6bc" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.659117 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.675236 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.683011 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.684606 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.702565 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "3be173c2-e112-49d2-8b3b-b4cd0ed730fb" (UID: "3be173c2-e112-49d2-8b3b-b4cd0ed730fb"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746107 4664 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-config-data\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746155 4664 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ca-certs\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746168 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746182 4664 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746879 4664 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746903 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wcts\" (UniqueName: \"kubernetes.io/projected/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-kube-api-access-9wcts\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746926 4664 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746937 4664 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.746951 4664 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3be173c2-e112-49d2-8b3b-b4cd0ed730fb-openstack-config\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.772998 4664 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 13 09:05:57 crc kubenswrapper[4664]: I1013 09:05:57.848959 4664 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Oct 13 09:05:58 crc kubenswrapper[4664]: I1013 09:05:58.047780 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:05:58 crc kubenswrapper[4664]: E1013 09:05:58.048111 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.686765 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Oct 13 09:06:07 crc kubenswrapper[4664]: E1013 09:06:07.688588 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="extract-content" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.691026 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="extract-content" Oct 13 09:06:07 crc kubenswrapper[4664]: E1013 09:06:07.691090 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="extract-utilities" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.691105 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="extract-utilities" Oct 13 09:06:07 crc kubenswrapper[4664]: E1013 09:06:07.691154 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="registry-server" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.691168 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="registry-server" Oct 13 09:06:07 crc kubenswrapper[4664]: E1013 09:06:07.691190 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="extract-content" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.691202 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="extract-content" Oct 13 09:06:07 crc kubenswrapper[4664]: E1013 09:06:07.691259 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3be173c2-e112-49d2-8b3b-b4cd0ed730fb" containerName="tempest-tests-tempest-tests-runner" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.691271 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3be173c2-e112-49d2-8b3b-b4cd0ed730fb" containerName="tempest-tests-tempest-tests-runner" Oct 13 09:06:07 crc kubenswrapper[4664]: E1013 09:06:07.691288 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="registry-server" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.691302 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="registry-server" Oct 13 09:06:07 crc kubenswrapper[4664]: E1013 09:06:07.691320 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="extract-utilities" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.691332 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="extract-utilities" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.692516 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3be173c2-e112-49d2-8b3b-b4cd0ed730fb" containerName="tempest-tests-tempest-tests-runner" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.692605 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="08a9405b-9db9-4ae0-b25c-21ac477f565b" containerName="registry-server" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.692645 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a011d78-383e-4c8a-89fd-3bdf7fe087f3" containerName="registry-server" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.694034 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.705084 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.720496 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-zpzpx" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.814763 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.814845 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxjqc\" (UniqueName: \"kubernetes.io/projected/95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2-kube-api-access-hxjqc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.916439 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.916495 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxjqc\" (UniqueName: \"kubernetes.io/projected/95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2-kube-api-access-hxjqc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.919770 4664 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.940854 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxjqc\" (UniqueName: \"kubernetes.io/projected/95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2-kube-api-access-hxjqc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:07 crc kubenswrapper[4664]: I1013 09:06:07.958673 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:08 crc kubenswrapper[4664]: I1013 09:06:08.029326 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Oct 13 09:06:08 crc kubenswrapper[4664]: I1013 09:06:08.507761 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Oct 13 09:06:08 crc kubenswrapper[4664]: I1013 09:06:08.773517 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2","Type":"ContainerStarted","Data":"53d7edd07cab7228ed8d1fbcdc5148ef4467d51268e17fa307c4bec30ef07c06"} Oct 13 09:06:10 crc kubenswrapper[4664]: I1013 09:06:10.047621 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:06:10 crc kubenswrapper[4664]: E1013 09:06:10.048329 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:06:10 crc kubenswrapper[4664]: I1013 09:06:10.801773 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2","Type":"ContainerStarted","Data":"42f19666564070749974b534162229a3e55ab18eb9f560bcab129170cc98e8c7"} Oct 13 09:06:10 crc kubenswrapper[4664]: I1013 09:06:10.834505 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.654371728 podStartE2EDuration="3.834474879s" podCreationTimestamp="2025-10-13 09:06:07 +0000 UTC" firstStartedPulling="2025-10-13 09:06:08.506157803 +0000 UTC m=+8376.193603045" lastFinishedPulling="2025-10-13 09:06:09.686260994 +0000 UTC m=+8377.373706196" observedRunningTime="2025-10-13 09:06:10.82855715 +0000 UTC m=+8378.516002362" watchObservedRunningTime="2025-10-13 09:06:10.834474879 +0000 UTC m=+8378.521920111" Oct 13 09:06:21 crc kubenswrapper[4664]: I1013 09:06:21.047501 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:06:21 crc kubenswrapper[4664]: E1013 09:06:21.048860 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.152514 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d8ddb"] Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.156632 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.167558 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d8ddb"] Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.247265 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nmsl\" (UniqueName: \"kubernetes.io/projected/060766c6-e251-4810-b741-0b30f3bf7d85-kube-api-access-6nmsl\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.247318 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-catalog-content\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.247405 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-utilities\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.348864 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-utilities\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.349071 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nmsl\" (UniqueName: \"kubernetes.io/projected/060766c6-e251-4810-b741-0b30f3bf7d85-kube-api-access-6nmsl\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.349108 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-catalog-content\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.349649 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-utilities\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.349710 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-catalog-content\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.375724 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nmsl\" (UniqueName: \"kubernetes.io/projected/060766c6-e251-4810-b741-0b30f3bf7d85-kube-api-access-6nmsl\") pod \"community-operators-d8ddb\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:31 crc kubenswrapper[4664]: I1013 09:06:31.481756 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:32 crc kubenswrapper[4664]: W1013 09:06:32.134307 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod060766c6_e251_4810_b741_0b30f3bf7d85.slice/crio-e3f319618fb2e3c6e8b3a6b82390e1e49dbe35819ecc2d5503aed28eb9e51efd WatchSource:0}: Error finding container e3f319618fb2e3c6e8b3a6b82390e1e49dbe35819ecc2d5503aed28eb9e51efd: Status 404 returned error can't find the container with id e3f319618fb2e3c6e8b3a6b82390e1e49dbe35819ecc2d5503aed28eb9e51efd Oct 13 09:06:32 crc kubenswrapper[4664]: I1013 09:06:32.135194 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d8ddb"] Oct 13 09:06:33 crc kubenswrapper[4664]: I1013 09:06:33.072134 4664 generic.go:334] "Generic (PLEG): container finished" podID="060766c6-e251-4810-b741-0b30f3bf7d85" containerID="371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db" exitCode=0 Oct 13 09:06:33 crc kubenswrapper[4664]: I1013 09:06:33.072590 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8ddb" event={"ID":"060766c6-e251-4810-b741-0b30f3bf7d85","Type":"ContainerDied","Data":"371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db"} Oct 13 09:06:33 crc kubenswrapper[4664]: I1013 09:06:33.073716 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8ddb" event={"ID":"060766c6-e251-4810-b741-0b30f3bf7d85","Type":"ContainerStarted","Data":"e3f319618fb2e3c6e8b3a6b82390e1e49dbe35819ecc2d5503aed28eb9e51efd"} Oct 13 09:06:34 crc kubenswrapper[4664]: I1013 09:06:34.084815 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8ddb" event={"ID":"060766c6-e251-4810-b741-0b30f3bf7d85","Type":"ContainerStarted","Data":"36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161"} Oct 13 09:06:35 crc kubenswrapper[4664]: I1013 09:06:35.047109 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:06:35 crc kubenswrapper[4664]: E1013 09:06:35.047642 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:06:35 crc kubenswrapper[4664]: I1013 09:06:35.095761 4664 generic.go:334] "Generic (PLEG): container finished" podID="060766c6-e251-4810-b741-0b30f3bf7d85" containerID="36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161" exitCode=0 Oct 13 09:06:35 crc kubenswrapper[4664]: I1013 09:06:35.095823 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8ddb" event={"ID":"060766c6-e251-4810-b741-0b30f3bf7d85","Type":"ContainerDied","Data":"36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161"} Oct 13 09:06:37 crc kubenswrapper[4664]: I1013 09:06:37.131158 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8ddb" event={"ID":"060766c6-e251-4810-b741-0b30f3bf7d85","Type":"ContainerStarted","Data":"ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854"} Oct 13 09:06:37 crc kubenswrapper[4664]: I1013 09:06:37.151621 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d8ddb" podStartSLOduration=3.10178364 podStartE2EDuration="6.151599181s" podCreationTimestamp="2025-10-13 09:06:31 +0000 UTC" firstStartedPulling="2025-10-13 09:06:33.074358919 +0000 UTC m=+8400.761804121" lastFinishedPulling="2025-10-13 09:06:36.12417446 +0000 UTC m=+8403.811619662" observedRunningTime="2025-10-13 09:06:37.150157872 +0000 UTC m=+8404.837603064" watchObservedRunningTime="2025-10-13 09:06:37.151599181 +0000 UTC m=+8404.839044373" Oct 13 09:06:41 crc kubenswrapper[4664]: I1013 09:06:41.483138 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:41 crc kubenswrapper[4664]: I1013 09:06:41.483985 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:41 crc kubenswrapper[4664]: I1013 09:06:41.544419 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:42 crc kubenswrapper[4664]: I1013 09:06:42.254625 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:42 crc kubenswrapper[4664]: I1013 09:06:42.339102 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d8ddb"] Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.217067 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d8ddb" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="registry-server" containerID="cri-o://ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854" gracePeriod=2 Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.747667 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.884405 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-utilities\") pod \"060766c6-e251-4810-b741-0b30f3bf7d85\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.884639 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nmsl\" (UniqueName: \"kubernetes.io/projected/060766c6-e251-4810-b741-0b30f3bf7d85-kube-api-access-6nmsl\") pod \"060766c6-e251-4810-b741-0b30f3bf7d85\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.884882 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-catalog-content\") pod \"060766c6-e251-4810-b741-0b30f3bf7d85\" (UID: \"060766c6-e251-4810-b741-0b30f3bf7d85\") " Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.885594 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-utilities" (OuterVolumeSpecName: "utilities") pod "060766c6-e251-4810-b741-0b30f3bf7d85" (UID: "060766c6-e251-4810-b741-0b30f3bf7d85"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.885736 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.891057 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/060766c6-e251-4810-b741-0b30f3bf7d85-kube-api-access-6nmsl" (OuterVolumeSpecName: "kube-api-access-6nmsl") pod "060766c6-e251-4810-b741-0b30f3bf7d85" (UID: "060766c6-e251-4810-b741-0b30f3bf7d85"). InnerVolumeSpecName "kube-api-access-6nmsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.968036 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "060766c6-e251-4810-b741-0b30f3bf7d85" (UID: "060766c6-e251-4810-b741-0b30f3bf7d85"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.987999 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nmsl\" (UniqueName: \"kubernetes.io/projected/060766c6-e251-4810-b741-0b30f3bf7d85-kube-api-access-6nmsl\") on node \"crc\" DevicePath \"\"" Oct 13 09:06:44 crc kubenswrapper[4664]: I1013 09:06:44.988251 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060766c6-e251-4810-b741-0b30f3bf7d85-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.230916 4664 generic.go:334] "Generic (PLEG): container finished" podID="060766c6-e251-4810-b741-0b30f3bf7d85" containerID="ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854" exitCode=0 Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.230973 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8ddb" event={"ID":"060766c6-e251-4810-b741-0b30f3bf7d85","Type":"ContainerDied","Data":"ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854"} Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.231004 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d8ddb" event={"ID":"060766c6-e251-4810-b741-0b30f3bf7d85","Type":"ContainerDied","Data":"e3f319618fb2e3c6e8b3a6b82390e1e49dbe35819ecc2d5503aed28eb9e51efd"} Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.231023 4664 scope.go:117] "RemoveContainer" containerID="ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.231129 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d8ddb" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.256553 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d8ddb"] Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.267632 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d8ddb"] Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.278402 4664 scope.go:117] "RemoveContainer" containerID="36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.302595 4664 scope.go:117] "RemoveContainer" containerID="371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.343904 4664 scope.go:117] "RemoveContainer" containerID="ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854" Oct 13 09:06:45 crc kubenswrapper[4664]: E1013 09:06:45.344473 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854\": container with ID starting with ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854 not found: ID does not exist" containerID="ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.344507 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854"} err="failed to get container status \"ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854\": rpc error: code = NotFound desc = could not find container \"ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854\": container with ID starting with ba225075d9b4ea0489fb5142bcaf7d771aa5a5f550d3b8e71b8f0d564810d854 not found: ID does not exist" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.344527 4664 scope.go:117] "RemoveContainer" containerID="36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161" Oct 13 09:06:45 crc kubenswrapper[4664]: E1013 09:06:45.344896 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161\": container with ID starting with 36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161 not found: ID does not exist" containerID="36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.344915 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161"} err="failed to get container status \"36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161\": rpc error: code = NotFound desc = could not find container \"36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161\": container with ID starting with 36d5a4d1de087bf53ff3234d06f53f669adb603ef9431b5bc58b947542613161 not found: ID does not exist" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.344928 4664 scope.go:117] "RemoveContainer" containerID="371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db" Oct 13 09:06:45 crc kubenswrapper[4664]: E1013 09:06:45.345216 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db\": container with ID starting with 371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db not found: ID does not exist" containerID="371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db" Oct 13 09:06:45 crc kubenswrapper[4664]: I1013 09:06:45.345274 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db"} err="failed to get container status \"371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db\": rpc error: code = NotFound desc = could not find container \"371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db\": container with ID starting with 371e9d35b8e446f0800021a98f394afb3b3b8200830f5ff77fb5efb4a7a3b9db not found: ID does not exist" Oct 13 09:06:47 crc kubenswrapper[4664]: I1013 09:06:47.060023 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" path="/var/lib/kubelet/pods/060766c6-e251-4810-b741-0b30f3bf7d85/volumes" Oct 13 09:06:47 crc kubenswrapper[4664]: I1013 09:06:47.089326 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-7445f97b5f-k8zxs" podUID="b4132324-29a1-4d67-91a2-9b7ec6a7c960" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Oct 13 09:06:49 crc kubenswrapper[4664]: I1013 09:06:49.052519 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:06:49 crc kubenswrapper[4664]: E1013 09:06:49.059885 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.394884 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-2dg27/must-gather-xxjdz"] Oct 13 09:06:53 crc kubenswrapper[4664]: E1013 09:06:53.395595 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="extract-content" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.395606 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="extract-content" Oct 13 09:06:53 crc kubenswrapper[4664]: E1013 09:06:53.395630 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="registry-server" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.395636 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="registry-server" Oct 13 09:06:53 crc kubenswrapper[4664]: E1013 09:06:53.395661 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="extract-utilities" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.395667 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="extract-utilities" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.395868 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="060766c6-e251-4810-b741-0b30f3bf7d85" containerName="registry-server" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.396813 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.402850 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-2dg27"/"default-dockercfg-62jw4" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.404542 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-2dg27"/"openshift-service-ca.crt" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.404605 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-2dg27"/"kube-root-ca.crt" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.415845 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-2dg27/must-gather-xxjdz"] Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.484514 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/12962c73-4eee-44a6-8382-3c2bb82bb73f-must-gather-output\") pod \"must-gather-xxjdz\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.484923 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcnj8\" (UniqueName: \"kubernetes.io/projected/12962c73-4eee-44a6-8382-3c2bb82bb73f-kube-api-access-bcnj8\") pod \"must-gather-xxjdz\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.587148 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/12962c73-4eee-44a6-8382-3c2bb82bb73f-must-gather-output\") pod \"must-gather-xxjdz\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.587264 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcnj8\" (UniqueName: \"kubernetes.io/projected/12962c73-4eee-44a6-8382-3c2bb82bb73f-kube-api-access-bcnj8\") pod \"must-gather-xxjdz\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.587629 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/12962c73-4eee-44a6-8382-3c2bb82bb73f-must-gather-output\") pod \"must-gather-xxjdz\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.608327 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcnj8\" (UniqueName: \"kubernetes.io/projected/12962c73-4eee-44a6-8382-3c2bb82bb73f-kube-api-access-bcnj8\") pod \"must-gather-xxjdz\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:53 crc kubenswrapper[4664]: I1013 09:06:53.713155 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:06:54 crc kubenswrapper[4664]: I1013 09:06:54.197478 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-2dg27/must-gather-xxjdz"] Oct 13 09:06:54 crc kubenswrapper[4664]: I1013 09:06:54.344200 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/must-gather-xxjdz" event={"ID":"12962c73-4eee-44a6-8382-3c2bb82bb73f","Type":"ContainerStarted","Data":"b0edad4a9375723a3151bb9c7d581ccaeda11cc2b2379be6530a4f60dff9d914"} Oct 13 09:07:00 crc kubenswrapper[4664]: I1013 09:07:00.047710 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:07:00 crc kubenswrapper[4664]: E1013 09:07:00.048411 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:07:03 crc kubenswrapper[4664]: I1013 09:07:03.440601 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/must-gather-xxjdz" event={"ID":"12962c73-4eee-44a6-8382-3c2bb82bb73f","Type":"ContainerStarted","Data":"4c9a5831506c57e5a6da7836c9fd17859c8fd672fb21ed83c6f3bd1f97d7d167"} Oct 13 09:07:03 crc kubenswrapper[4664]: I1013 09:07:03.441187 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/must-gather-xxjdz" event={"ID":"12962c73-4eee-44a6-8382-3c2bb82bb73f","Type":"ContainerStarted","Data":"991b4a86b3acf612768cbf2ef25e25aa847979b4bdf8a06c457a6e34eb4c8150"} Oct 13 09:07:03 crc kubenswrapper[4664]: I1013 09:07:03.464297 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-2dg27/must-gather-xxjdz" podStartSLOduration=2.014440387 podStartE2EDuration="10.464275502s" podCreationTimestamp="2025-10-13 09:06:53 +0000 UTC" firstStartedPulling="2025-10-13 09:06:54.217934758 +0000 UTC m=+8421.905379950" lastFinishedPulling="2025-10-13 09:07:02.667769873 +0000 UTC m=+8430.355215065" observedRunningTime="2025-10-13 09:07:03.459273288 +0000 UTC m=+8431.146718500" watchObservedRunningTime="2025-10-13 09:07:03.464275502 +0000 UTC m=+8431.151720704" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.520270 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-2dg27/crc-debug-dldz7"] Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.522693 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.575577 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ced2dd00-a9aa-4816-ae41-ec083404b6ca-host\") pod \"crc-debug-dldz7\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.576210 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd9t9\" (UniqueName: \"kubernetes.io/projected/ced2dd00-a9aa-4816-ae41-ec083404b6ca-kube-api-access-sd9t9\") pod \"crc-debug-dldz7\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.678579 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ced2dd00-a9aa-4816-ae41-ec083404b6ca-host\") pod \"crc-debug-dldz7\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.678724 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd9t9\" (UniqueName: \"kubernetes.io/projected/ced2dd00-a9aa-4816-ae41-ec083404b6ca-kube-api-access-sd9t9\") pod \"crc-debug-dldz7\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.679791 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ced2dd00-a9aa-4816-ae41-ec083404b6ca-host\") pod \"crc-debug-dldz7\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.708550 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd9t9\" (UniqueName: \"kubernetes.io/projected/ced2dd00-a9aa-4816-ae41-ec083404b6ca-kube-api-access-sd9t9\") pod \"crc-debug-dldz7\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:09 crc kubenswrapper[4664]: I1013 09:07:09.846343 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:07:10 crc kubenswrapper[4664]: I1013 09:07:10.524387 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/crc-debug-dldz7" event={"ID":"ced2dd00-a9aa-4816-ae41-ec083404b6ca","Type":"ContainerStarted","Data":"293c760d4a8b5152a9150ce0f3282fa16070b0f3857343277662616fb8c47621"} Oct 13 09:07:15 crc kubenswrapper[4664]: I1013 09:07:15.048810 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:07:15 crc kubenswrapper[4664]: E1013 09:07:15.049471 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:07:23 crc kubenswrapper[4664]: I1013 09:07:23.651419 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/crc-debug-dldz7" event={"ID":"ced2dd00-a9aa-4816-ae41-ec083404b6ca","Type":"ContainerStarted","Data":"e5056c7136766814476d67417fa5193876c0c15f88e5a5b8063d10c02ae66ccd"} Oct 13 09:07:23 crc kubenswrapper[4664]: I1013 09:07:23.680645 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-2dg27/crc-debug-dldz7" podStartSLOduration=2.026596283 podStartE2EDuration="14.6806223s" podCreationTimestamp="2025-10-13 09:07:09 +0000 UTC" firstStartedPulling="2025-10-13 09:07:09.884969646 +0000 UTC m=+8437.572414838" lastFinishedPulling="2025-10-13 09:07:22.538995663 +0000 UTC m=+8450.226440855" observedRunningTime="2025-10-13 09:07:23.669272476 +0000 UTC m=+8451.356717688" watchObservedRunningTime="2025-10-13 09:07:23.6806223 +0000 UTC m=+8451.368067512" Oct 13 09:07:29 crc kubenswrapper[4664]: I1013 09:07:29.047008 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:07:30 crc kubenswrapper[4664]: I1013 09:07:30.723340 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"63a6404a345bd57e47c943aa2b879c928b3c2e3a2894752fb10d2393a45030b4"} Oct 13 09:08:13 crc kubenswrapper[4664]: I1013 09:08:13.096060 4664 generic.go:334] "Generic (PLEG): container finished" podID="ced2dd00-a9aa-4816-ae41-ec083404b6ca" containerID="e5056c7136766814476d67417fa5193876c0c15f88e5a5b8063d10c02ae66ccd" exitCode=0 Oct 13 09:08:13 crc kubenswrapper[4664]: I1013 09:08:13.096144 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/crc-debug-dldz7" event={"ID":"ced2dd00-a9aa-4816-ae41-ec083404b6ca","Type":"ContainerDied","Data":"e5056c7136766814476d67417fa5193876c0c15f88e5a5b8063d10c02ae66ccd"} Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.252401 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.279873 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-2dg27/crc-debug-dldz7"] Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.289458 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-2dg27/crc-debug-dldz7"] Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.363982 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd9t9\" (UniqueName: \"kubernetes.io/projected/ced2dd00-a9aa-4816-ae41-ec083404b6ca-kube-api-access-sd9t9\") pod \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.364046 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ced2dd00-a9aa-4816-ae41-ec083404b6ca-host\") pod \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\" (UID: \"ced2dd00-a9aa-4816-ae41-ec083404b6ca\") " Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.364112 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ced2dd00-a9aa-4816-ae41-ec083404b6ca-host" (OuterVolumeSpecName: "host") pod "ced2dd00-a9aa-4816-ae41-ec083404b6ca" (UID: "ced2dd00-a9aa-4816-ae41-ec083404b6ca"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.364503 4664 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ced2dd00-a9aa-4816-ae41-ec083404b6ca-host\") on node \"crc\" DevicePath \"\"" Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.372346 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ced2dd00-a9aa-4816-ae41-ec083404b6ca-kube-api-access-sd9t9" (OuterVolumeSpecName: "kube-api-access-sd9t9") pod "ced2dd00-a9aa-4816-ae41-ec083404b6ca" (UID: "ced2dd00-a9aa-4816-ae41-ec083404b6ca"). InnerVolumeSpecName "kube-api-access-sd9t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:08:14 crc kubenswrapper[4664]: I1013 09:08:14.466655 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd9t9\" (UniqueName: \"kubernetes.io/projected/ced2dd00-a9aa-4816-ae41-ec083404b6ca-kube-api-access-sd9t9\") on node \"crc\" DevicePath \"\"" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.059345 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ced2dd00-a9aa-4816-ae41-ec083404b6ca" path="/var/lib/kubelet/pods/ced2dd00-a9aa-4816-ae41-ec083404b6ca/volumes" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.115739 4664 scope.go:117] "RemoveContainer" containerID="e5056c7136766814476d67417fa5193876c0c15f88e5a5b8063d10c02ae66ccd" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.115811 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-dldz7" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.617997 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-2dg27/crc-debug-zfzzl"] Oct 13 09:08:15 crc kubenswrapper[4664]: E1013 09:08:15.618656 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ced2dd00-a9aa-4816-ae41-ec083404b6ca" containerName="container-00" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.618668 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="ced2dd00-a9aa-4816-ae41-ec083404b6ca" containerName="container-00" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.619330 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="ced2dd00-a9aa-4816-ae41-ec083404b6ca" containerName="container-00" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.620640 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.687963 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v6t6\" (UniqueName: \"kubernetes.io/projected/af1690d8-b82b-4558-a28e-457cc43373ea-kube-api-access-6v6t6\") pod \"crc-debug-zfzzl\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.688142 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/af1690d8-b82b-4558-a28e-457cc43373ea-host\") pod \"crc-debug-zfzzl\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.790601 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/af1690d8-b82b-4558-a28e-457cc43373ea-host\") pod \"crc-debug-zfzzl\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.790914 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v6t6\" (UniqueName: \"kubernetes.io/projected/af1690d8-b82b-4558-a28e-457cc43373ea-kube-api-access-6v6t6\") pod \"crc-debug-zfzzl\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.790769 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/af1690d8-b82b-4558-a28e-457cc43373ea-host\") pod \"crc-debug-zfzzl\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.816561 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v6t6\" (UniqueName: \"kubernetes.io/projected/af1690d8-b82b-4558-a28e-457cc43373ea-kube-api-access-6v6t6\") pod \"crc-debug-zfzzl\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:15 crc kubenswrapper[4664]: I1013 09:08:15.943569 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:16 crc kubenswrapper[4664]: I1013 09:08:16.125551 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/crc-debug-zfzzl" event={"ID":"af1690d8-b82b-4558-a28e-457cc43373ea","Type":"ContainerStarted","Data":"4382792f23b3277ea451ce48cd7d09374db3e92dff8389093f471ab3898891e3"} Oct 13 09:08:17 crc kubenswrapper[4664]: I1013 09:08:17.134926 4664 generic.go:334] "Generic (PLEG): container finished" podID="af1690d8-b82b-4558-a28e-457cc43373ea" containerID="5f3f818f54c22f59a1df11e01685f32cea3cb231ca2bf6d89129e67c7931a7f7" exitCode=0 Oct 13 09:08:17 crc kubenswrapper[4664]: I1013 09:08:17.135027 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/crc-debug-zfzzl" event={"ID":"af1690d8-b82b-4558-a28e-457cc43373ea","Type":"ContainerDied","Data":"5f3f818f54c22f59a1df11e01685f32cea3cb231ca2bf6d89129e67c7931a7f7"} Oct 13 09:08:18 crc kubenswrapper[4664]: I1013 09:08:18.272233 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:18 crc kubenswrapper[4664]: I1013 09:08:18.338420 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/af1690d8-b82b-4558-a28e-457cc43373ea-host\") pod \"af1690d8-b82b-4558-a28e-457cc43373ea\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " Oct 13 09:08:18 crc kubenswrapper[4664]: I1013 09:08:18.338520 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/af1690d8-b82b-4558-a28e-457cc43373ea-host" (OuterVolumeSpecName: "host") pod "af1690d8-b82b-4558-a28e-457cc43373ea" (UID: "af1690d8-b82b-4558-a28e-457cc43373ea"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 09:08:18 crc kubenswrapper[4664]: I1013 09:08:18.338540 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v6t6\" (UniqueName: \"kubernetes.io/projected/af1690d8-b82b-4558-a28e-457cc43373ea-kube-api-access-6v6t6\") pod \"af1690d8-b82b-4558-a28e-457cc43373ea\" (UID: \"af1690d8-b82b-4558-a28e-457cc43373ea\") " Oct 13 09:08:18 crc kubenswrapper[4664]: I1013 09:08:18.338976 4664 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/af1690d8-b82b-4558-a28e-457cc43373ea-host\") on node \"crc\" DevicePath \"\"" Oct 13 09:08:18 crc kubenswrapper[4664]: I1013 09:08:18.361582 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af1690d8-b82b-4558-a28e-457cc43373ea-kube-api-access-6v6t6" (OuterVolumeSpecName: "kube-api-access-6v6t6") pod "af1690d8-b82b-4558-a28e-457cc43373ea" (UID: "af1690d8-b82b-4558-a28e-457cc43373ea"). InnerVolumeSpecName "kube-api-access-6v6t6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:08:18 crc kubenswrapper[4664]: I1013 09:08:18.449233 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v6t6\" (UniqueName: \"kubernetes.io/projected/af1690d8-b82b-4558-a28e-457cc43373ea-kube-api-access-6v6t6\") on node \"crc\" DevicePath \"\"" Oct 13 09:08:19 crc kubenswrapper[4664]: I1013 09:08:19.097323 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-2dg27/crc-debug-zfzzl"] Oct 13 09:08:19 crc kubenswrapper[4664]: I1013 09:08:19.104611 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-2dg27/crc-debug-zfzzl"] Oct 13 09:08:19 crc kubenswrapper[4664]: I1013 09:08:19.159315 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4382792f23b3277ea451ce48cd7d09374db3e92dff8389093f471ab3898891e3" Oct 13 09:08:19 crc kubenswrapper[4664]: I1013 09:08:19.159435 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-zfzzl" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.283569 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-2dg27/crc-debug-glplm"] Oct 13 09:08:20 crc kubenswrapper[4664]: E1013 09:08:20.285473 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af1690d8-b82b-4558-a28e-457cc43373ea" containerName="container-00" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.285584 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="af1690d8-b82b-4558-a28e-457cc43373ea" containerName="container-00" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.285951 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="af1690d8-b82b-4558-a28e-457cc43373ea" containerName="container-00" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.286845 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.380285 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4kv6\" (UniqueName: \"kubernetes.io/projected/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-kube-api-access-x4kv6\") pod \"crc-debug-glplm\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.380377 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-host\") pod \"crc-debug-glplm\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.482291 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4kv6\" (UniqueName: \"kubernetes.io/projected/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-kube-api-access-x4kv6\") pod \"crc-debug-glplm\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.482381 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-host\") pod \"crc-debug-glplm\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.482830 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-host\") pod \"crc-debug-glplm\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.501646 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4kv6\" (UniqueName: \"kubernetes.io/projected/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-kube-api-access-x4kv6\") pod \"crc-debug-glplm\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: I1013 09:08:20.602479 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:20 crc kubenswrapper[4664]: W1013 09:08:20.642329 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd11ad2fb_6b93_4c53_a69d_2e1470c3bc9d.slice/crio-369025752e2e768902c58412deef679b05ef3972aa1b355ac422bac09ad1b9dd WatchSource:0}: Error finding container 369025752e2e768902c58412deef679b05ef3972aa1b355ac422bac09ad1b9dd: Status 404 returned error can't find the container with id 369025752e2e768902c58412deef679b05ef3972aa1b355ac422bac09ad1b9dd Oct 13 09:08:21 crc kubenswrapper[4664]: I1013 09:08:21.057914 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af1690d8-b82b-4558-a28e-457cc43373ea" path="/var/lib/kubelet/pods/af1690d8-b82b-4558-a28e-457cc43373ea/volumes" Oct 13 09:08:21 crc kubenswrapper[4664]: I1013 09:08:21.175759 4664 generic.go:334] "Generic (PLEG): container finished" podID="d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d" containerID="9c35fb91472b8b178b8c64bac1f80b0809178592ebb06d777958cb5686a5ed1e" exitCode=0 Oct 13 09:08:21 crc kubenswrapper[4664]: I1013 09:08:21.175809 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/crc-debug-glplm" event={"ID":"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d","Type":"ContainerDied","Data":"9c35fb91472b8b178b8c64bac1f80b0809178592ebb06d777958cb5686a5ed1e"} Oct 13 09:08:21 crc kubenswrapper[4664]: I1013 09:08:21.175839 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/crc-debug-glplm" event={"ID":"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d","Type":"ContainerStarted","Data":"369025752e2e768902c58412deef679b05ef3972aa1b355ac422bac09ad1b9dd"} Oct 13 09:08:21 crc kubenswrapper[4664]: I1013 09:08:21.217249 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-2dg27/crc-debug-glplm"] Oct 13 09:08:21 crc kubenswrapper[4664]: I1013 09:08:21.225497 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-2dg27/crc-debug-glplm"] Oct 13 09:08:22 crc kubenswrapper[4664]: I1013 09:08:22.308173 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:22 crc kubenswrapper[4664]: I1013 09:08:22.418647 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4kv6\" (UniqueName: \"kubernetes.io/projected/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-kube-api-access-x4kv6\") pod \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " Oct 13 09:08:22 crc kubenswrapper[4664]: I1013 09:08:22.418731 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-host\") pod \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\" (UID: \"d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d\") " Oct 13 09:08:22 crc kubenswrapper[4664]: I1013 09:08:22.418920 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-host" (OuterVolumeSpecName: "host") pod "d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d" (UID: "d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 13 09:08:22 crc kubenswrapper[4664]: I1013 09:08:22.419498 4664 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-host\") on node \"crc\" DevicePath \"\"" Oct 13 09:08:22 crc kubenswrapper[4664]: I1013 09:08:22.427948 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-kube-api-access-x4kv6" (OuterVolumeSpecName: "kube-api-access-x4kv6") pod "d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d" (UID: "d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d"). InnerVolumeSpecName "kube-api-access-x4kv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:08:22 crc kubenswrapper[4664]: I1013 09:08:22.521472 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4kv6\" (UniqueName: \"kubernetes.io/projected/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d-kube-api-access-x4kv6\") on node \"crc\" DevicePath \"\"" Oct 13 09:08:23 crc kubenswrapper[4664]: I1013 09:08:23.067131 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d" path="/var/lib/kubelet/pods/d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d/volumes" Oct 13 09:08:23 crc kubenswrapper[4664]: I1013 09:08:23.202338 4664 scope.go:117] "RemoveContainer" containerID="9c35fb91472b8b178b8c64bac1f80b0809178592ebb06d777958cb5686a5ed1e" Oct 13 09:08:23 crc kubenswrapper[4664]: I1013 09:08:23.202364 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/crc-debug-glplm" Oct 13 09:08:25 crc kubenswrapper[4664]: I1013 09:08:25.865784 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-79f9447d46-mnb2r_576e5bbc-aadf-4530-9a7f-bcf8401874a6/barbican-api/0.log" Oct 13 09:08:26 crc kubenswrapper[4664]: I1013 09:08:26.096030 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-79f9447d46-mnb2r_576e5bbc-aadf-4530-9a7f-bcf8401874a6/barbican-api-log/0.log" Oct 13 09:08:26 crc kubenswrapper[4664]: I1013 09:08:26.389615 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-75bf6d8446-kj4kf_75f2bbf2-f367-4513-bb39-e403007183a8/barbican-keystone-listener/0.log" Oct 13 09:08:26 crc kubenswrapper[4664]: I1013 09:08:26.551460 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-75bf6d8446-kj4kf_75f2bbf2-f367-4513-bb39-e403007183a8/barbican-keystone-listener-log/0.log" Oct 13 09:08:26 crc kubenswrapper[4664]: I1013 09:08:26.590023 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5b55d75d57-5ft48_48a7c25d-db6e-46dd-8b73-c6269b23cb5e/barbican-worker/0.log" Oct 13 09:08:26 crc kubenswrapper[4664]: I1013 09:08:26.637469 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5b55d75d57-5ft48_48a7c25d-db6e-46dd-8b73-c6269b23cb5e/barbican-worker-log/0.log" Oct 13 09:08:26 crc kubenswrapper[4664]: I1013 09:08:26.916949 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-sfcs6_5f1b4f59-a38d-41bc-8f1b-92479be8b4b1/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:27 crc kubenswrapper[4664]: I1013 09:08:27.045509 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_851d9da2-ec75-41a8-8201-11601fe995d6/ceilometer-central-agent/0.log" Oct 13 09:08:27 crc kubenswrapper[4664]: I1013 09:08:27.205380 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_851d9da2-ec75-41a8-8201-11601fe995d6/ceilometer-notification-agent/0.log" Oct 13 09:08:27 crc kubenswrapper[4664]: I1013 09:08:27.522677 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_851d9da2-ec75-41a8-8201-11601fe995d6/proxy-httpd/0.log" Oct 13 09:08:27 crc kubenswrapper[4664]: I1013 09:08:27.526495 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_851d9da2-ec75-41a8-8201-11601fe995d6/sg-core/0.log" Oct 13 09:08:27 crc kubenswrapper[4664]: I1013 09:08:27.784660 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2f726ffb-02b8-4628-b128-1422badcf6ae/cinder-api/0.log" Oct 13 09:08:27 crc kubenswrapper[4664]: I1013 09:08:27.823544 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2f726ffb-02b8-4628-b128-1422badcf6ae/cinder-api-log/0.log" Oct 13 09:08:27 crc kubenswrapper[4664]: I1013 09:08:27.968568 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_fd97ddf9-5f06-4c91-af04-42c116fac89d/cinder-scheduler/1.log" Oct 13 09:08:28 crc kubenswrapper[4664]: I1013 09:08:28.122613 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_fd97ddf9-5f06-4c91-af04-42c116fac89d/cinder-scheduler/0.log" Oct 13 09:08:28 crc kubenswrapper[4664]: I1013 09:08:28.223966 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_fd97ddf9-5f06-4c91-af04-42c116fac89d/probe/0.log" Oct 13 09:08:28 crc kubenswrapper[4664]: I1013 09:08:28.257136 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-rstts_a0c2c2ed-c303-4f67-b8fc-fb13f3218090/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:28 crc kubenswrapper[4664]: I1013 09:08:28.533944 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-sbq9r_69834621-9486-4b28-b964-bfb76e7f5a71/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:28 crc kubenswrapper[4664]: I1013 09:08:28.733608 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-txl6z_776354d2-5430-4d54-b3d2-4d7f1880f8bf/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:28 crc kubenswrapper[4664]: I1013 09:08:28.904451 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6d8755c899-zzh4h_b5ff54ae-da3a-4f2a-9931-9cd0834e4672/init/0.log" Oct 13 09:08:29 crc kubenswrapper[4664]: I1013 09:08:29.084361 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6d8755c899-zzh4h_b5ff54ae-da3a-4f2a-9931-9cd0834e4672/init/0.log" Oct 13 09:08:29 crc kubenswrapper[4664]: I1013 09:08:29.341009 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6d8755c899-zzh4h_b5ff54ae-da3a-4f2a-9931-9cd0834e4672/dnsmasq-dns/0.log" Oct 13 09:08:29 crc kubenswrapper[4664]: I1013 09:08:29.385948 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-t56l2_fd7254f1-5310-490d-bf9d-ad81da0a7fb1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:29 crc kubenswrapper[4664]: I1013 09:08:29.639490 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_1c5e42f4-75e5-4e9e-97b6-dda31c400142/glance-log/0.log" Oct 13 09:08:29 crc kubenswrapper[4664]: I1013 09:08:29.643422 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_1c5e42f4-75e5-4e9e-97b6-dda31c400142/glance-httpd/0.log" Oct 13 09:08:29 crc kubenswrapper[4664]: I1013 09:08:29.885425 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_97b5bee2-4876-4d80-97cf-53c8d91521d9/glance-log/0.log" Oct 13 09:08:29 crc kubenswrapper[4664]: I1013 09:08:29.898222 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_97b5bee2-4876-4d80-97cf-53c8d91521d9/glance-httpd/0.log" Oct 13 09:08:30 crc kubenswrapper[4664]: I1013 09:08:30.309301 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-9c86db6bb-5dfhm_d2d2de6d-b7be-4b17-a73c-33d7dacf228c/heat-engine/0.log" Oct 13 09:08:30 crc kubenswrapper[4664]: I1013 09:08:30.773859 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7d78c558d-rjg4v_786f35fd-a7cc-4749-bc5e-47c28ffa4245/horizon/2.log" Oct 13 09:08:30 crc kubenswrapper[4664]: I1013 09:08:30.791653 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7d78c558d-rjg4v_786f35fd-a7cc-4749-bc5e-47c28ffa4245/horizon/3.log" Oct 13 09:08:31 crc kubenswrapper[4664]: I1013 09:08:31.205280 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-h4q27_815f680a-a040-4f4d-acf4-ce60901f32f2/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:31 crc kubenswrapper[4664]: I1013 09:08:31.220399 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-66b9b87ff8-xsb2c_23693d9c-d47d-406d-864d-fa1c94b8f381/heat-api/0.log" Oct 13 09:08:31 crc kubenswrapper[4664]: I1013 09:08:31.520846 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-sdqq6_a92d9f52-f23c-48e8-a74a-cc9b68d5c362/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:31 crc kubenswrapper[4664]: I1013 09:08:31.541826 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-564db64b9c-vkx7h_10a9562e-4b1b-4183-bb7a-c3f934109c1d/heat-cfnapi/0.log" Oct 13 09:08:31 crc kubenswrapper[4664]: I1013 09:08:31.925602 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29339041-6qn7p_41f22bd0-6f2c-4c59-838a-1cfe57b94ca9/keystone-cron/0.log" Oct 13 09:08:32 crc kubenswrapper[4664]: I1013 09:08:32.141234 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29339101-zkldc_eca6f5d0-1798-4440-a991-f02266967bb0/keystone-cron/0.log" Oct 13 09:08:32 crc kubenswrapper[4664]: I1013 09:08:32.191653 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_43ff3dc8-8a2c-4051-813b-69406ed7359e/kube-state-metrics/0.log" Oct 13 09:08:32 crc kubenswrapper[4664]: I1013 09:08:32.285980 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7d78c558d-rjg4v_786f35fd-a7cc-4749-bc5e-47c28ffa4245/horizon-log/0.log" Oct 13 09:08:32 crc kubenswrapper[4664]: I1013 09:08:32.519893 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-j5lxs_0f92f225-d9b3-4d85-967e-b878358e05e4/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:33 crc kubenswrapper[4664]: I1013 09:08:33.031514 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-86c94b664f-jbp5z_086d544e-1901-4bfc-b170-54ac640f25ee/neutron-httpd/0.log" Oct 13 09:08:33 crc kubenswrapper[4664]: I1013 09:08:33.263190 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-cgs44_8977ee61-f509-4bc9-bd69-ebefbfdafda9/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:33 crc kubenswrapper[4664]: I1013 09:08:33.584074 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6559fb6d89-x6txc_69ae4f17-d740-411e-8cb2-45afda327f7e/keystone-api/0.log" Oct 13 09:08:34 crc kubenswrapper[4664]: I1013 09:08:34.418511 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-86c94b664f-jbp5z_086d544e-1901-4bfc-b170-54ac640f25ee/neutron-api/0.log" Oct 13 09:08:34 crc kubenswrapper[4664]: I1013 09:08:34.492937 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_1e95e8ed-be4d-4f4e-ad35-3e0314d294f7/nova-cell0-conductor-conductor/0.log" Oct 13 09:08:34 crc kubenswrapper[4664]: I1013 09:08:34.913537 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_4fa7f807-537d-4f7f-8ef4-a0f61358ec92/nova-cell1-conductor-conductor/0.log" Oct 13 09:08:35 crc kubenswrapper[4664]: I1013 09:08:35.371169 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_0aeab06b-f1e7-4479-90f9-726b28672eaa/nova-cell1-novncproxy-novncproxy/0.log" Oct 13 09:08:35 crc kubenswrapper[4664]: I1013 09:08:35.574533 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-qg68p_85584bbe-e8c0-4359-a828-731d3bc2cd5f/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:35 crc kubenswrapper[4664]: I1013 09:08:35.784166 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769/nova-api-log/0.log" Oct 13 09:08:36 crc kubenswrapper[4664]: I1013 09:08:36.104350 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_7276f420-9ae8-46b9-a55f-903114d2f25c/nova-metadata-log/0.log" Oct 13 09:08:36 crc kubenswrapper[4664]: I1013 09:08:36.632314 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931/mysql-bootstrap/0.log" Oct 13 09:08:36 crc kubenswrapper[4664]: I1013 09:08:36.853205 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931/mysql-bootstrap/0.log" Oct 13 09:08:36 crc kubenswrapper[4664]: I1013 09:08:36.893581 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a0e3dfbe-5a9a-49f7-b6eb-ff53f02cb769/nova-api-api/0.log" Oct 13 09:08:36 crc kubenswrapper[4664]: I1013 09:08:36.941200 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_b4ae1d1c-38d6-4893-955d-c0d736f6b32f/nova-scheduler-scheduler/0.log" Oct 13 09:08:37 crc kubenswrapper[4664]: I1013 09:08:37.080306 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931/galera/1.log" Oct 13 09:08:37 crc kubenswrapper[4664]: I1013 09:08:37.171221 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a5d8fbdf-76bc-48d9-83b9-9d3ff6d58931/galera/0.log" Oct 13 09:08:37 crc kubenswrapper[4664]: I1013 09:08:37.328871 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_cc0510b6-15a8-4d1a-93c3-f92869340539/mysql-bootstrap/0.log" Oct 13 09:08:37 crc kubenswrapper[4664]: I1013 09:08:37.599460 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_cc0510b6-15a8-4d1a-93c3-f92869340539/galera/1.log" Oct 13 09:08:37 crc kubenswrapper[4664]: I1013 09:08:37.622089 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_cc0510b6-15a8-4d1a-93c3-f92869340539/mysql-bootstrap/0.log" Oct 13 09:08:37 crc kubenswrapper[4664]: I1013 09:08:37.626197 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_cc0510b6-15a8-4d1a-93c3-f92869340539/galera/0.log" Oct 13 09:08:37 crc kubenswrapper[4664]: I1013 09:08:37.948441 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_26880f4a-2c23-4107-86b6-937a82c2fcb1/openstackclient/0.log" Oct 13 09:08:38 crc kubenswrapper[4664]: I1013 09:08:38.092919 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-47b95_7c8ef9b2-22fd-4d00-b710-8e22b4fefecf/ovn-controller/0.log" Oct 13 09:08:38 crc kubenswrapper[4664]: I1013 09:08:38.193632 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-jdzlh_849abee9-7bc4-4b49-82d3-a98fd5f192f4/openstack-network-exporter/0.log" Oct 13 09:08:38 crc kubenswrapper[4664]: I1013 09:08:38.419642 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-frzn7_e96bba9a-6e2d-49e9-9543-a58d6c5de1fb/ovsdb-server-init/0.log" Oct 13 09:08:38 crc kubenswrapper[4664]: I1013 09:08:38.734791 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-frzn7_e96bba9a-6e2d-49e9-9543-a58d6c5de1fb/ovsdb-server/0.log" Oct 13 09:08:38 crc kubenswrapper[4664]: I1013 09:08:38.820909 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-frzn7_e96bba9a-6e2d-49e9-9543-a58d6c5de1fb/ovs-vswitchd/0.log" Oct 13 09:08:38 crc kubenswrapper[4664]: I1013 09:08:38.833942 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-frzn7_e96bba9a-6e2d-49e9-9543-a58d6c5de1fb/ovsdb-server-init/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.101077 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-gt52t_f741ab28-9784-40cc-a21b-858af4ffb104/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.284355 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1cfedf4f-ec80-462c-a8a3-cda5afa7e451/openstack-network-exporter/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.337260 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1cfedf4f-ec80-462c-a8a3-cda5afa7e451/ovn-northd/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.540112 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9294acab-d17a-4e3b-bf85-19429846ca0c/openstack-network-exporter/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.586896 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9294acab-d17a-4e3b-bf85-19429846ca0c/ovsdbserver-nb/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.755478 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4431fc75-3beb-408f-8981-ef409291bd2d/openstack-network-exporter/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.842215 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4431fc75-3beb-408f-8981-ef409291bd2d/ovsdbserver-sb/0.log" Oct 13 09:08:39 crc kubenswrapper[4664]: I1013 09:08:39.918179 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_7276f420-9ae8-46b9-a55f-903114d2f25c/nova-metadata-metadata/0.log" Oct 13 09:08:40 crc kubenswrapper[4664]: I1013 09:08:40.428342 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_8e03819f-6d75-4465-908a-a3ab436ab132/setup-container/0.log" Oct 13 09:08:40 crc kubenswrapper[4664]: I1013 09:08:40.524454 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5b885874bd-tzm67_0ba8020c-0e48-4f3b-a4bc-646f201bfdef/placement-api/0.log" Oct 13 09:08:40 crc kubenswrapper[4664]: I1013 09:08:40.540587 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_8e03819f-6d75-4465-908a-a3ab436ab132/setup-container/0.log" Oct 13 09:08:40 crc kubenswrapper[4664]: I1013 09:08:40.671881 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_8e03819f-6d75-4465-908a-a3ab436ab132/rabbitmq/0.log" Oct 13 09:08:40 crc kubenswrapper[4664]: I1013 09:08:40.749486 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5b885874bd-tzm67_0ba8020c-0e48-4f3b-a4bc-646f201bfdef/placement-log/0.log" Oct 13 09:08:40 crc kubenswrapper[4664]: I1013 09:08:40.856253 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0c856173-8811-44d6-a72c-fd31966ef668/setup-container/0.log" Oct 13 09:08:41 crc kubenswrapper[4664]: I1013 09:08:41.117552 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0c856173-8811-44d6-a72c-fd31966ef668/setup-container/0.log" Oct 13 09:08:41 crc kubenswrapper[4664]: I1013 09:08:41.122421 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-8dcvw_3098a2cf-d529-4bd7-9b2a-03111b8e6e2c/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:41 crc kubenswrapper[4664]: I1013 09:08:41.142006 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0c856173-8811-44d6-a72c-fd31966ef668/rabbitmq/0.log" Oct 13 09:08:41 crc kubenswrapper[4664]: I1013 09:08:41.405864 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-bvnz6_63b39d04-1c3c-4854-9de5-aba98a83f9a7/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:41 crc kubenswrapper[4664]: I1013 09:08:41.504240 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-f52hq_ef7305a7-edd9-4023-a761-713e870e85ce/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:41 crc kubenswrapper[4664]: I1013 09:08:41.729423 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-v66dv_50bfac0b-3fc4-47ba-8570-1b048e9d7ca4/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:41 crc kubenswrapper[4664]: I1013 09:08:41.816319 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-slrqm_63131591-3029-46be-b656-6e833036fa2d/ssh-known-hosts-edpm-deployment/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.175390 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7445f97b5f-k8zxs_b4132324-29a1-4d67-91a2-9b7ec6a7c960/proxy-server/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.248340 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-c9fnl_b1cb7b09-b7ec-4d03-b998-5442ec0ba9de/swift-ring-rebalance/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.389376 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7445f97b5f-k8zxs_b4132324-29a1-4d67-91a2-9b7ec6a7c960/proxy-httpd/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.470047 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/account-auditor/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.562071 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/account-reaper/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.729017 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/account-server/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.774013 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/container-auditor/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.821722 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/account-replicator/0.log" Oct 13 09:08:42 crc kubenswrapper[4664]: I1013 09:08:42.869501 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/container-replicator/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.012486 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/container-server/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.125159 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/object-auditor/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.126444 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/container-updater/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.158414 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/object-expirer/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.356148 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/object-server/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.425018 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/object-replicator/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.444174 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/object-updater/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.508518 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/rsync/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.618853 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_9953b8b0-42bc-4192-8791-d0564fa27f10/swift-recon-cron/0.log" Oct 13 09:08:43 crc kubenswrapper[4664]: I1013 09:08:43.793957 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-zcjht_618aef5f-9779-425d-9ce0-b827194143f4/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:44 crc kubenswrapper[4664]: I1013 09:08:44.264477 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_95f9f9fc-580c-4ebb-b04a-5946d4a2e7d2/test-operator-logs-container/0.log" Oct 13 09:08:44 crc kubenswrapper[4664]: I1013 09:08:44.265574 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest-s00-multi-thread-testing_67f690b7-0671-4a15-9d4d-1c65126e8a9a/tempest-tests-tempest-tests-runner/0.log" Oct 13 09:08:44 crc kubenswrapper[4664]: I1013 09:08:44.293654 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest-s01-single-thread-testing_3be173c2-e112-49d2-8b3b-b4cd0ed730fb/tempest-tests-tempest-tests-runner/0.log" Oct 13 09:08:44 crc kubenswrapper[4664]: I1013 09:08:44.527758 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-jrkpn_2e6b7add-eb7c-4a59-a8bf-9c5a61dd3979/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 13 09:08:54 crc kubenswrapper[4664]: I1013 09:08:54.625586 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_5f87e03e-f1be-4f12-a267-393fcde6e51e/memcached/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.159653 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-658bdf4b74-m4bpt_9f6b6a3e-5706-49fa-aafa-49f68b19997e/manager/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.177735 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-658bdf4b74-m4bpt_9f6b6a3e-5706-49fa-aafa-49f68b19997e/kube-rbac-proxy/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.353969 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn_5b50ed9e-42f9-4a40-bdfb-61bc560256b5/util/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.553563 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn_5b50ed9e-42f9-4a40-bdfb-61bc560256b5/pull/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.555187 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn_5b50ed9e-42f9-4a40-bdfb-61bc560256b5/util/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.567386 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn_5b50ed9e-42f9-4a40-bdfb-61bc560256b5/pull/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.758202 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn_5b50ed9e-42f9-4a40-bdfb-61bc560256b5/util/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.791444 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn_5b50ed9e-42f9-4a40-bdfb-61bc560256b5/pull/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.799682 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365bl7pnn_5b50ed9e-42f9-4a40-bdfb-61bc560256b5/extract/0.log" Oct 13 09:09:14 crc kubenswrapper[4664]: I1013 09:09:14.947101 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-7b7fb68549-zptb2_3c368f1e-93f0-440a-ad95-d205dd78e4b2/kube-rbac-proxy/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.003462 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-7b7fb68549-zptb2_3c368f1e-93f0-440a-ad95-d205dd78e4b2/manager/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.035334 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-85d5d9dd78-sl4lq_deda5c15-ffb4-44e6-9e27-465106737111/kube-rbac-proxy/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.188596 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-85d5d9dd78-sl4lq_deda5c15-ffb4-44e6-9e27-465106737111/manager/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.254853 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84b9b84486-8hnvm_68c3d701-56d2-4bce-8c6e-e1894084fecf/kube-rbac-proxy/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.346281 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84b9b84486-8hnvm_68c3d701-56d2-4bce-8c6e-e1894084fecf/manager/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.521964 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-858f76bbdd-ncsms_bd3c97ad-17c7-47d7-ae5e-1a67c489c142/kube-rbac-proxy/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.551636 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-858f76bbdd-ncsms_bd3c97ad-17c7-47d7-ae5e-1a67c489c142/manager/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.700745 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7ffbcb7588-s6ftn_5daf4cb7-d305-4408-97d8-9645cd4e61d5/kube-rbac-proxy/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.749543 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7ffbcb7588-s6ftn_5daf4cb7-d305-4408-97d8-9645cd4e61d5/manager/0.log" Oct 13 09:09:15 crc kubenswrapper[4664]: I1013 09:09:15.850707 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-656bcbd775-22k5w_a096a4c5-5890-4100-8462-ec39d621ff38/kube-rbac-proxy/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.053072 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-656bcbd775-22k5w_a096a4c5-5890-4100-8462-ec39d621ff38/manager/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.075418 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-9c5c78d49-srnhl_de55f4c7-2413-4a00-8691-a9545525fc88/kube-rbac-proxy/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.088584 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-9c5c78d49-srnhl_de55f4c7-2413-4a00-8691-a9545525fc88/manager/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.258715 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-55b6b7c7b8-f57sl_bf55d8b4-9315-48b2-962c-318911833b6f/kube-rbac-proxy/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.386771 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-55b6b7c7b8-f57sl_bf55d8b4-9315-48b2-962c-318911833b6f/manager/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.494431 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5f67fbc655-6mlnb_5367fac7-2b9f-4745-b3cb-4accdf26ef59/kube-rbac-proxy/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.543181 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5f67fbc655-6mlnb_5367fac7-2b9f-4745-b3cb-4accdf26ef59/manager/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.630808 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-f9fb45f8f-f2nxl_a76ae989-9e97-43ee-a38f-ebb30be19ab6/kube-rbac-proxy/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.700064 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-f9fb45f8f-f2nxl_a76ae989-9e97-43ee-a38f-ebb30be19ab6/manager/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.846480 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-79d585cb66-vxchx_91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026/kube-rbac-proxy/0.log" Oct 13 09:09:16 crc kubenswrapper[4664]: I1013 09:09:16.887183 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-79d585cb66-vxchx_91d09e1b-c6e8-4a4b-a4a6-ecb48fa42026/manager/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.034668 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5df598886f-mcst5_b28d60cb-ff14-4b64-b7b0-3af252c60311/kube-rbac-proxy/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.186640 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5df598886f-mcst5_b28d60cb-ff14-4b64-b7b0-3af252c60311/manager/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.291096 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69fdcfc5f5-nn6ql_ccadedcb-9722-4c86-9b22-17d4f9ce1cd7/kube-rbac-proxy/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.340346 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69fdcfc5f5-nn6ql_ccadedcb-9722-4c86-9b22-17d4f9ce1cd7/manager/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.418650 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5956dffb7b75c7q_24e25e0a-a138-41fb-b90e-08d800f751b4/kube-rbac-proxy/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.594101 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5956dffb7b75c7q_24e25e0a-a138-41fb-b90e-08d800f751b4/manager/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.654526 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5b95c8954b-w8mlm_6647aa13-3608-4eeb-87b7-26741b9c2a6f/kube-rbac-proxy/0.log" Oct 13 09:09:17 crc kubenswrapper[4664]: I1013 09:09:17.937439 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-688d597459-5n8bw_844ed68c-a79c-4751-98b2-d0459d583d06/kube-rbac-proxy/0.log" Oct 13 09:09:18 crc kubenswrapper[4664]: I1013 09:09:18.201116 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-688d597459-5n8bw_844ed68c-a79c-4751-98b2-d0459d583d06/operator/0.log" Oct 13 09:09:18 crc kubenswrapper[4664]: I1013 09:09:18.553878 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-bsghg_82047d93-7f79-495b-9e1a-380994104bb0/registry-server/0.log" Oct 13 09:09:18 crc kubenswrapper[4664]: I1013 09:09:18.693371 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-79df5fb58c-jfsd4_b371ef2b-6ffd-4759-8a02-279705b4a4d3/kube-rbac-proxy/0.log" Oct 13 09:09:18 crc kubenswrapper[4664]: I1013 09:09:18.933590 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-79df5fb58c-jfsd4_b371ef2b-6ffd-4759-8a02-279705b4a4d3/manager/0.log" Oct 13 09:09:18 crc kubenswrapper[4664]: I1013 09:09:18.988485 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-68b6c87b68-fl4ff_86b8e5cb-18d9-4931-afb3-4f8dc9f788f0/manager/0.log" Oct 13 09:09:18 crc kubenswrapper[4664]: I1013 09:09:18.992814 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-68b6c87b68-fl4ff_86b8e5cb-18d9-4931-afb3-4f8dc9f788f0/kube-rbac-proxy/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.077122 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5b95c8954b-w8mlm_6647aa13-3608-4eeb-87b7-26741b9c2a6f/manager/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.182606 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-bxvcx_3ee22f32-c675-449d-bdb9-670673cf57b4/operator/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.366385 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-db6d7f97b-6qvrk_0e469825-8f36-48dc-8dce-dcbdaf6ca58c/manager/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.377459 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-db6d7f97b-6qvrk_0e469825-8f36-48dc-8dce-dcbdaf6ca58c/kube-rbac-proxy/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.473883 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-67cfc6749b-8mv7j_b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07/kube-rbac-proxy/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.632379 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-67cfc6749b-8mv7j_b9fb4a10-d2a3-4fd7-9224-de5d7a5dea07/manager/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.708547 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5458f77c4-7vhzw_f837553e-b572-4dcc-91b4-a8e6c2deb097/manager/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.748890 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5458f77c4-7vhzw_f837553e-b572-4dcc-91b4-a8e6c2deb097/kube-rbac-proxy/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.868358 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7f554bff7b-798km_3ae920d7-c605-4984-a072-dad04b3cc6cc/kube-rbac-proxy/0.log" Oct 13 09:09:19 crc kubenswrapper[4664]: I1013 09:09:19.869803 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7f554bff7b-798km_3ae920d7-c605-4984-a072-dad04b3cc6cc/manager/0.log" Oct 13 09:09:35 crc kubenswrapper[4664]: I1013 09:09:35.984979 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jv45s_e67f1677-e72c-4ee7-ae5e-b1d80a6597fe/control-plane-machine-set-operator/0.log" Oct 13 09:09:36 crc kubenswrapper[4664]: I1013 09:09:36.125305 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-nhbkw_3881c61c-1e8b-437c-85b1-6bade9f8f4f9/kube-rbac-proxy/0.log" Oct 13 09:09:36 crc kubenswrapper[4664]: I1013 09:09:36.127021 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-nhbkw_3881c61c-1e8b-437c-85b1-6bade9f8f4f9/machine-api-operator/0.log" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.545877 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tcrzd"] Oct 13 09:09:44 crc kubenswrapper[4664]: E1013 09:09:44.546664 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d" containerName="container-00" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.546677 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d" containerName="container-00" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.546850 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="d11ad2fb-6b93-4c53-a69d-2e1470c3bc9d" containerName="container-00" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.548228 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.571146 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tcrzd"] Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.606735 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-catalog-content\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.606783 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgt96\" (UniqueName: \"kubernetes.io/projected/097f8408-d53b-4710-bf60-3d25a16b94ad-kube-api-access-rgt96\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.608045 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-utilities\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.710073 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-catalog-content\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.710133 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgt96\" (UniqueName: \"kubernetes.io/projected/097f8408-d53b-4710-bf60-3d25a16b94ad-kube-api-access-rgt96\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.710735 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-utilities\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.711189 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-catalog-content\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.711716 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-utilities\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.730300 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgt96\" (UniqueName: \"kubernetes.io/projected/097f8408-d53b-4710-bf60-3d25a16b94ad-kube-api-access-rgt96\") pod \"certified-operators-tcrzd\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:44 crc kubenswrapper[4664]: I1013 09:09:44.871129 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:45 crc kubenswrapper[4664]: I1013 09:09:45.793908 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tcrzd"] Oct 13 09:09:46 crc kubenswrapper[4664]: I1013 09:09:46.038255 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerStarted","Data":"5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd"} Oct 13 09:09:46 crc kubenswrapper[4664]: I1013 09:09:46.038491 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerStarted","Data":"e6d6ceafd5cd7d239875c7862c4aa89713940d75ce4b79400af1dff4fd8e25ca"} Oct 13 09:09:46 crc kubenswrapper[4664]: I1013 09:09:46.039843 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 09:09:47 crc kubenswrapper[4664]: I1013 09:09:47.048067 4664 generic.go:334] "Generic (PLEG): container finished" podID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerID="5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd" exitCode=0 Oct 13 09:09:47 crc kubenswrapper[4664]: I1013 09:09:47.058316 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerDied","Data":"5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd"} Oct 13 09:09:47 crc kubenswrapper[4664]: I1013 09:09:47.058373 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerStarted","Data":"bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db"} Oct 13 09:09:48 crc kubenswrapper[4664]: I1013 09:09:48.609492 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-ggcmz_1979a4d1-7c0f-4eee-960e-66863300377d/cert-manager-controller/0.log" Oct 13 09:09:48 crc kubenswrapper[4664]: I1013 09:09:48.764414 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-nd2p7_201a4602-9190-430c-b6c4-52c0b6584883/cert-manager-cainjector/0.log" Oct 13 09:09:48 crc kubenswrapper[4664]: I1013 09:09:48.848917 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-5p4jw_4f2dd003-654f-4e3d-9fb9-cbea80c68acd/cert-manager-webhook/0.log" Oct 13 09:09:49 crc kubenswrapper[4664]: I1013 09:09:49.077572 4664 generic.go:334] "Generic (PLEG): container finished" podID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerID="bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db" exitCode=0 Oct 13 09:09:49 crc kubenswrapper[4664]: I1013 09:09:49.077613 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerDied","Data":"bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db"} Oct 13 09:09:51 crc kubenswrapper[4664]: I1013 09:09:51.093692 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerStarted","Data":"c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536"} Oct 13 09:09:51 crc kubenswrapper[4664]: I1013 09:09:51.113768 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tcrzd" podStartSLOduration=2.8333565849999998 podStartE2EDuration="7.112869462s" podCreationTimestamp="2025-10-13 09:09:44 +0000 UTC" firstStartedPulling="2025-10-13 09:09:46.039627356 +0000 UTC m=+8593.727072548" lastFinishedPulling="2025-10-13 09:09:50.319140233 +0000 UTC m=+8598.006585425" observedRunningTime="2025-10-13 09:09:51.111785374 +0000 UTC m=+8598.799230566" watchObservedRunningTime="2025-10-13 09:09:51.112869462 +0000 UTC m=+8598.800314654" Oct 13 09:09:54 crc kubenswrapper[4664]: I1013 09:09:54.871555 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:54 crc kubenswrapper[4664]: I1013 09:09:54.871981 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:09:55 crc kubenswrapper[4664]: I1013 09:09:55.920344 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-tcrzd" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="registry-server" probeResult="failure" output=< Oct 13 09:09:55 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 09:09:55 crc kubenswrapper[4664]: > Oct 13 09:09:58 crc kubenswrapper[4664]: I1013 09:09:58.812533 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:09:58 crc kubenswrapper[4664]: I1013 09:09:58.813911 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:10:03 crc kubenswrapper[4664]: I1013 09:10:03.752099 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6b874cbd85-825ww_7586db4a-cf0c-40b1-b014-77677f118219/nmstate-console-plugin/0.log" Oct 13 09:10:03 crc kubenswrapper[4664]: I1013 09:10:03.825885 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-khxbh_ee1d22c6-0909-4894-aff0-cc30dcbe54a5/nmstate-handler/0.log" Oct 13 09:10:03 crc kubenswrapper[4664]: I1013 09:10:03.956112 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-rncps_c1894568-58c9-473a-bbd8-484ed8a89a6d/nmstate-metrics/0.log" Oct 13 09:10:03 crc kubenswrapper[4664]: I1013 09:10:03.956724 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-rncps_c1894568-58c9-473a-bbd8-484ed8a89a6d/kube-rbac-proxy/0.log" Oct 13 09:10:04 crc kubenswrapper[4664]: I1013 09:10:04.122897 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-858ddd8f98-fsrhv_5e688f39-2130-46ae-8ca0-d1926e83ad24/nmstate-operator/0.log" Oct 13 09:10:04 crc kubenswrapper[4664]: I1013 09:10:04.188524 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6cdbc54649-kwpkw_d5f7b13a-10cb-44b2-89b1-e434b8f81923/nmstate-webhook/0.log" Oct 13 09:10:04 crc kubenswrapper[4664]: I1013 09:10:04.918946 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:10:04 crc kubenswrapper[4664]: I1013 09:10:04.975160 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:10:05 crc kubenswrapper[4664]: I1013 09:10:05.155406 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tcrzd"] Oct 13 09:10:06 crc kubenswrapper[4664]: I1013 09:10:06.238025 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tcrzd" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="registry-server" containerID="cri-o://c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536" gracePeriod=2 Oct 13 09:10:06 crc kubenswrapper[4664]: I1013 09:10:06.855017 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:10:06 crc kubenswrapper[4664]: I1013 09:10:06.947277 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-catalog-content\") pod \"097f8408-d53b-4710-bf60-3d25a16b94ad\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " Oct 13 09:10:06 crc kubenswrapper[4664]: I1013 09:10:06.947334 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-utilities\") pod \"097f8408-d53b-4710-bf60-3d25a16b94ad\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " Oct 13 09:10:06 crc kubenswrapper[4664]: I1013 09:10:06.947485 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgt96\" (UniqueName: \"kubernetes.io/projected/097f8408-d53b-4710-bf60-3d25a16b94ad-kube-api-access-rgt96\") pod \"097f8408-d53b-4710-bf60-3d25a16b94ad\" (UID: \"097f8408-d53b-4710-bf60-3d25a16b94ad\") " Oct 13 09:10:06 crc kubenswrapper[4664]: I1013 09:10:06.950595 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-utilities" (OuterVolumeSpecName: "utilities") pod "097f8408-d53b-4710-bf60-3d25a16b94ad" (UID: "097f8408-d53b-4710-bf60-3d25a16b94ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:10:06 crc kubenswrapper[4664]: I1013 09:10:06.966124 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/097f8408-d53b-4710-bf60-3d25a16b94ad-kube-api-access-rgt96" (OuterVolumeSpecName: "kube-api-access-rgt96") pod "097f8408-d53b-4710-bf60-3d25a16b94ad" (UID: "097f8408-d53b-4710-bf60-3d25a16b94ad"). InnerVolumeSpecName "kube-api-access-rgt96". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.039763 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "097f8408-d53b-4710-bf60-3d25a16b94ad" (UID: "097f8408-d53b-4710-bf60-3d25a16b94ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.049275 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.049467 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097f8408-d53b-4710-bf60-3d25a16b94ad-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.049553 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgt96\" (UniqueName: \"kubernetes.io/projected/097f8408-d53b-4710-bf60-3d25a16b94ad-kube-api-access-rgt96\") on node \"crc\" DevicePath \"\"" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.259848 4664 generic.go:334] "Generic (PLEG): container finished" podID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerID="c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536" exitCode=0 Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.259895 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerDied","Data":"c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536"} Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.259923 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tcrzd" event={"ID":"097f8408-d53b-4710-bf60-3d25a16b94ad","Type":"ContainerDied","Data":"e6d6ceafd5cd7d239875c7862c4aa89713940d75ce4b79400af1dff4fd8e25ca"} Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.259928 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tcrzd" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.259941 4664 scope.go:117] "RemoveContainer" containerID="c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.298832 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tcrzd"] Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.298947 4664 scope.go:117] "RemoveContainer" containerID="bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.314590 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tcrzd"] Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.366987 4664 scope.go:117] "RemoveContainer" containerID="5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.415095 4664 scope.go:117] "RemoveContainer" containerID="c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536" Oct 13 09:10:07 crc kubenswrapper[4664]: E1013 09:10:07.419854 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536\": container with ID starting with c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536 not found: ID does not exist" containerID="c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.419985 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536"} err="failed to get container status \"c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536\": rpc error: code = NotFound desc = could not find container \"c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536\": container with ID starting with c4760e7c126bd5d8a09a94ec2a784b14aa246377872ff843dbb304bf2b5fa536 not found: ID does not exist" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.420078 4664 scope.go:117] "RemoveContainer" containerID="bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db" Oct 13 09:10:07 crc kubenswrapper[4664]: E1013 09:10:07.420560 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db\": container with ID starting with bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db not found: ID does not exist" containerID="bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.420598 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db"} err="failed to get container status \"bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db\": rpc error: code = NotFound desc = could not find container \"bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db\": container with ID starting with bacff67b3ded1cd6772fbfc2412b171e747e0b30db3dfd95209c1bdc79aaf1db not found: ID does not exist" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.420624 4664 scope.go:117] "RemoveContainer" containerID="5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd" Oct 13 09:10:07 crc kubenswrapper[4664]: E1013 09:10:07.420992 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd\": container with ID starting with 5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd not found: ID does not exist" containerID="5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd" Oct 13 09:10:07 crc kubenswrapper[4664]: I1013 09:10:07.421085 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd"} err="failed to get container status \"5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd\": rpc error: code = NotFound desc = could not find container \"5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd\": container with ID starting with 5f880145407bcb76b76406de3ab5ac0f59e433d469dda73815950fdfbb75adbd not found: ID does not exist" Oct 13 09:10:09 crc kubenswrapper[4664]: I1013 09:10:09.058071 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" path="/var/lib/kubelet/pods/097f8408-d53b-4710-bf60-3d25a16b94ad/volumes" Oct 13 09:10:18 crc kubenswrapper[4664]: I1013 09:10:18.960702 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-fhr4v_84ac3e13-afa3-4136-ba43-738b66c8e84e/controller/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.002551 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-fhr4v_84ac3e13-afa3-4136-ba43-738b66c8e84e/kube-rbac-proxy/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.147036 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-frr-files/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.333887 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-frr-files/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.369729 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-reloader/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.453396 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-metrics/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.509290 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-reloader/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.633220 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-frr-files/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.652824 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-metrics/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.697616 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-reloader/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.786155 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-metrics/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.922232 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-frr-files/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.924378 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-metrics/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.961132 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/cp-reloader/0.log" Oct 13 09:10:19 crc kubenswrapper[4664]: I1013 09:10:19.991858 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/controller/1.log" Oct 13 09:10:20 crc kubenswrapper[4664]: I1013 09:10:20.186358 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/controller/0.log" Oct 13 09:10:20 crc kubenswrapper[4664]: I1013 09:10:20.378953 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/frr-metrics/0.log" Oct 13 09:10:20 crc kubenswrapper[4664]: I1013 09:10:20.617951 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/kube-rbac-proxy/0.log" Oct 13 09:10:20 crc kubenswrapper[4664]: I1013 09:10:20.666484 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/kube-rbac-proxy-frr/0.log" Oct 13 09:10:20 crc kubenswrapper[4664]: I1013 09:10:20.856999 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/reloader/0.log" Oct 13 09:10:21 crc kubenswrapper[4664]: I1013 09:10:21.059923 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/frr/1.log" Oct 13 09:10:21 crc kubenswrapper[4664]: I1013 09:10:21.230136 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-64bf5d555-nf4cs_cc585f12-7a56-4e1e-a5a1-4a9ccedd4309/frr-k8s-webhook-server/0.log" Oct 13 09:10:21 crc kubenswrapper[4664]: I1013 09:10:21.233141 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-64bf5d555-nf4cs_cc585f12-7a56-4e1e-a5a1-4a9ccedd4309/frr-k8s-webhook-server/1.log" Oct 13 09:10:21 crc kubenswrapper[4664]: I1013 09:10:21.562595 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-85599f4f6-7xvrk_c5b636b9-85bf-4ebc-8fea-04f2bc895d6a/manager/0.log" Oct 13 09:10:21 crc kubenswrapper[4664]: I1013 09:10:21.743860 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-594d989fdd-xrzvh_0daca0ae-d791-4eca-bdc5-8e5f598b8d85/webhook-server/1.log" Oct 13 09:10:21 crc kubenswrapper[4664]: I1013 09:10:21.788325 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-594d989fdd-xrzvh_0daca0ae-d791-4eca-bdc5-8e5f598b8d85/webhook-server/0.log" Oct 13 09:10:21 crc kubenswrapper[4664]: I1013 09:10:21.958954 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-7pjp7_9037bcaf-a327-4e60-acf8-978687eb88e9/frr/0.log" Oct 13 09:10:22 crc kubenswrapper[4664]: I1013 09:10:22.129149 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sngcf_54a84985-f353-4f50-aec1-d7b5501c1d2c/speaker/1.log" Oct 13 09:10:22 crc kubenswrapper[4664]: I1013 09:10:22.146365 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sngcf_54a84985-f353-4f50-aec1-d7b5501c1d2c/kube-rbac-proxy/0.log" Oct 13 09:10:22 crc kubenswrapper[4664]: I1013 09:10:22.459834 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-sngcf_54a84985-f353-4f50-aec1-d7b5501c1d2c/speaker/0.log" Oct 13 09:10:28 crc kubenswrapper[4664]: I1013 09:10:28.811642 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:10:28 crc kubenswrapper[4664]: I1013 09:10:28.812260 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:10:34 crc kubenswrapper[4664]: I1013 09:10:34.719581 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2_3821c37c-46e6-432f-9787-b6f422897dde/util/0.log" Oct 13 09:10:34 crc kubenswrapper[4664]: I1013 09:10:34.973774 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2_3821c37c-46e6-432f-9787-b6f422897dde/pull/0.log" Oct 13 09:10:34 crc kubenswrapper[4664]: I1013 09:10:34.984672 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2_3821c37c-46e6-432f-9787-b6f422897dde/pull/0.log" Oct 13 09:10:34 crc kubenswrapper[4664]: I1013 09:10:34.988233 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2_3821c37c-46e6-432f-9787-b6f422897dde/util/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.183375 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2_3821c37c-46e6-432f-9787-b6f422897dde/extract/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.188527 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2_3821c37c-46e6-432f-9787-b6f422897dde/pull/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.219955 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2c55m2_3821c37c-46e6-432f-9787-b6f422897dde/util/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.414244 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bbwgq_eef97848-b083-4ac5-a9bd-5b8f047b420b/extract-utilities/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.589472 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bbwgq_eef97848-b083-4ac5-a9bd-5b8f047b420b/extract-utilities/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.604064 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bbwgq_eef97848-b083-4ac5-a9bd-5b8f047b420b/extract-content/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.649328 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bbwgq_eef97848-b083-4ac5-a9bd-5b8f047b420b/extract-content/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.781965 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bbwgq_eef97848-b083-4ac5-a9bd-5b8f047b420b/extract-content/0.log" Oct 13 09:10:35 crc kubenswrapper[4664]: I1013 09:10:35.785533 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bbwgq_eef97848-b083-4ac5-a9bd-5b8f047b420b/extract-utilities/0.log" Oct 13 09:10:36 crc kubenswrapper[4664]: I1013 09:10:36.082435 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5hrqb_3960f54e-4fe8-4198-adf1-3aea88880c0f/extract-utilities/0.log" Oct 13 09:10:36 crc kubenswrapper[4664]: I1013 09:10:36.407844 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5hrqb_3960f54e-4fe8-4198-adf1-3aea88880c0f/extract-content/0.log" Oct 13 09:10:36 crc kubenswrapper[4664]: I1013 09:10:36.443675 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5hrqb_3960f54e-4fe8-4198-adf1-3aea88880c0f/extract-content/0.log" Oct 13 09:10:36 crc kubenswrapper[4664]: I1013 09:10:36.492951 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5hrqb_3960f54e-4fe8-4198-adf1-3aea88880c0f/extract-utilities/0.log" Oct 13 09:10:36 crc kubenswrapper[4664]: I1013 09:10:36.766292 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5hrqb_3960f54e-4fe8-4198-adf1-3aea88880c0f/extract-utilities/0.log" Oct 13 09:10:36 crc kubenswrapper[4664]: I1013 09:10:36.766533 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5hrqb_3960f54e-4fe8-4198-adf1-3aea88880c0f/extract-content/0.log" Oct 13 09:10:36 crc kubenswrapper[4664]: I1013 09:10:36.990454 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-bbwgq_eef97848-b083-4ac5-a9bd-5b8f047b420b/registry-server/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.079909 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz_645e6031-efcb-4cdd-8758-670729f16fd2/util/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.363686 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz_645e6031-efcb-4cdd-8758-670729f16fd2/util/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.478539 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz_645e6031-efcb-4cdd-8758-670729f16fd2/pull/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.498823 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz_645e6031-efcb-4cdd-8758-670729f16fd2/pull/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.634457 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz_645e6031-efcb-4cdd-8758-670729f16fd2/util/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.768469 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz_645e6031-efcb-4cdd-8758-670729f16fd2/pull/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.806231 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5hrqb_3960f54e-4fe8-4198-adf1-3aea88880c0f/registry-server/0.log" Oct 13 09:10:37 crc kubenswrapper[4664]: I1013 09:10:37.825047 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835ccfrgz_645e6031-efcb-4cdd-8758-670729f16fd2/extract/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.015672 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-rhmxl_acb034b5-2645-458a-91ae-14c42b6632b2/marketplace-operator/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.125074 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/extract-utilities/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.328143 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/extract-utilities/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.338916 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/extract-content/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.375423 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/extract-content/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.553221 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/extract-utilities/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.585772 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/extract-content/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.776200 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/registry-server/1.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.887762 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5b7pw_41825a43-78e2-42f0-aec8-2778276d69d8/registry-server/0.log" Oct 13 09:10:38 crc kubenswrapper[4664]: I1013 09:10:38.932198 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pt8xd_ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8/extract-utilities/0.log" Oct 13 09:10:39 crc kubenswrapper[4664]: I1013 09:10:39.060054 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pt8xd_ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8/extract-utilities/0.log" Oct 13 09:10:39 crc kubenswrapper[4664]: I1013 09:10:39.083443 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pt8xd_ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8/extract-content/0.log" Oct 13 09:10:39 crc kubenswrapper[4664]: I1013 09:10:39.130471 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pt8xd_ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8/extract-content/0.log" Oct 13 09:10:39 crc kubenswrapper[4664]: I1013 09:10:39.290103 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pt8xd_ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8/extract-content/0.log" Oct 13 09:10:39 crc kubenswrapper[4664]: I1013 09:10:39.304752 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pt8xd_ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8/extract-utilities/0.log" Oct 13 09:10:40 crc kubenswrapper[4664]: I1013 09:10:40.047318 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pt8xd_ddbcfb1e-6a5d-41b8-9bb2-7ad41be5faa8/registry-server/0.log" Oct 13 09:10:58 crc kubenswrapper[4664]: I1013 09:10:58.812391 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:10:58 crc kubenswrapper[4664]: I1013 09:10:58.812859 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:10:58 crc kubenswrapper[4664]: I1013 09:10:58.812905 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 09:10:58 crc kubenswrapper[4664]: I1013 09:10:58.813600 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"63a6404a345bd57e47c943aa2b879c928b3c2e3a2894752fb10d2393a45030b4"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 09:10:58 crc kubenswrapper[4664]: I1013 09:10:58.813645 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://63a6404a345bd57e47c943aa2b879c928b3c2e3a2894752fb10d2393a45030b4" gracePeriod=600 Oct 13 09:10:59 crc kubenswrapper[4664]: I1013 09:10:59.784361 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="63a6404a345bd57e47c943aa2b879c928b3c2e3a2894752fb10d2393a45030b4" exitCode=0 Oct 13 09:10:59 crc kubenswrapper[4664]: I1013 09:10:59.784864 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"63a6404a345bd57e47c943aa2b879c928b3c2e3a2894752fb10d2393a45030b4"} Oct 13 09:10:59 crc kubenswrapper[4664]: I1013 09:10:59.784902 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerStarted","Data":"9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19"} Oct 13 09:10:59 crc kubenswrapper[4664]: I1013 09:10:59.784926 4664 scope.go:117] "RemoveContainer" containerID="da1281e2f5844adb2d9a24f27b0228d47e184b9d317ccf4cedf312bf7069d426" Oct 13 09:13:04 crc kubenswrapper[4664]: I1013 09:13:04.087137 4664 generic.go:334] "Generic (PLEG): container finished" podID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerID="991b4a86b3acf612768cbf2ef25e25aa847979b4bdf8a06c457a6e34eb4c8150" exitCode=0 Oct 13 09:13:04 crc kubenswrapper[4664]: I1013 09:13:04.087219 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-2dg27/must-gather-xxjdz" event={"ID":"12962c73-4eee-44a6-8382-3c2bb82bb73f","Type":"ContainerDied","Data":"991b4a86b3acf612768cbf2ef25e25aa847979b4bdf8a06c457a6e34eb4c8150"} Oct 13 09:13:04 crc kubenswrapper[4664]: I1013 09:13:04.088624 4664 scope.go:117] "RemoveContainer" containerID="991b4a86b3acf612768cbf2ef25e25aa847979b4bdf8a06c457a6e34eb4c8150" Oct 13 09:13:05 crc kubenswrapper[4664]: I1013 09:13:05.171741 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-2dg27_must-gather-xxjdz_12962c73-4eee-44a6-8382-3c2bb82bb73f/gather/0.log" Oct 13 09:13:15 crc kubenswrapper[4664]: I1013 09:13:15.558837 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-2dg27/must-gather-xxjdz"] Oct 13 09:13:15 crc kubenswrapper[4664]: I1013 09:13:15.560171 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-2dg27/must-gather-xxjdz" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerName="copy" containerID="cri-o://4c9a5831506c57e5a6da7836c9fd17859c8fd672fb21ed83c6f3bd1f97d7d167" gracePeriod=2 Oct 13 09:13:15 crc kubenswrapper[4664]: I1013 09:13:15.568148 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-2dg27/must-gather-xxjdz"] Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.257122 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-2dg27_must-gather-xxjdz_12962c73-4eee-44a6-8382-3c2bb82bb73f/copy/0.log" Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.258492 4664 generic.go:334] "Generic (PLEG): container finished" podID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerID="4c9a5831506c57e5a6da7836c9fd17859c8fd672fb21ed83c6f3bd1f97d7d167" exitCode=143 Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.397743 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-2dg27_must-gather-xxjdz_12962c73-4eee-44a6-8382-3c2bb82bb73f/copy/0.log" Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.399204 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.556658 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/12962c73-4eee-44a6-8382-3c2bb82bb73f-must-gather-output\") pod \"12962c73-4eee-44a6-8382-3c2bb82bb73f\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.557463 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcnj8\" (UniqueName: \"kubernetes.io/projected/12962c73-4eee-44a6-8382-3c2bb82bb73f-kube-api-access-bcnj8\") pod \"12962c73-4eee-44a6-8382-3c2bb82bb73f\" (UID: \"12962c73-4eee-44a6-8382-3c2bb82bb73f\") " Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.573116 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12962c73-4eee-44a6-8382-3c2bb82bb73f-kube-api-access-bcnj8" (OuterVolumeSpecName: "kube-api-access-bcnj8") pod "12962c73-4eee-44a6-8382-3c2bb82bb73f" (UID: "12962c73-4eee-44a6-8382-3c2bb82bb73f"). InnerVolumeSpecName "kube-api-access-bcnj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.661116 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcnj8\" (UniqueName: \"kubernetes.io/projected/12962c73-4eee-44a6-8382-3c2bb82bb73f-kube-api-access-bcnj8\") on node \"crc\" DevicePath \"\"" Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.698550 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12962c73-4eee-44a6-8382-3c2bb82bb73f-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "12962c73-4eee-44a6-8382-3c2bb82bb73f" (UID: "12962c73-4eee-44a6-8382-3c2bb82bb73f"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:13:16 crc kubenswrapper[4664]: I1013 09:13:16.762521 4664 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/12962c73-4eee-44a6-8382-3c2bb82bb73f-must-gather-output\") on node \"crc\" DevicePath \"\"" Oct 13 09:13:17 crc kubenswrapper[4664]: I1013 09:13:17.073921 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" path="/var/lib/kubelet/pods/12962c73-4eee-44a6-8382-3c2bb82bb73f/volumes" Oct 13 09:13:17 crc kubenswrapper[4664]: I1013 09:13:17.272007 4664 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-2dg27_must-gather-xxjdz_12962c73-4eee-44a6-8382-3c2bb82bb73f/copy/0.log" Oct 13 09:13:17 crc kubenswrapper[4664]: I1013 09:13:17.272414 4664 scope.go:117] "RemoveContainer" containerID="4c9a5831506c57e5a6da7836c9fd17859c8fd672fb21ed83c6f3bd1f97d7d167" Oct 13 09:13:17 crc kubenswrapper[4664]: I1013 09:13:17.272505 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-2dg27/must-gather-xxjdz" Oct 13 09:13:17 crc kubenswrapper[4664]: I1013 09:13:17.294688 4664 scope.go:117] "RemoveContainer" containerID="991b4a86b3acf612768cbf2ef25e25aa847979b4bdf8a06c457a6e34eb4c8150" Oct 13 09:13:28 crc kubenswrapper[4664]: I1013 09:13:28.811756 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:13:28 crc kubenswrapper[4664]: I1013 09:13:28.812391 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:13:58 crc kubenswrapper[4664]: I1013 09:13:58.812382 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:13:58 crc kubenswrapper[4664]: I1013 09:13:58.813185 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:14:28 crc kubenswrapper[4664]: I1013 09:14:28.812380 4664 patch_prober.go:28] interesting pod/machine-config-daemon-hkzpl container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 13 09:14:28 crc kubenswrapper[4664]: I1013 09:14:28.812924 4664 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 13 09:14:28 crc kubenswrapper[4664]: I1013 09:14:28.812974 4664 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" Oct 13 09:14:28 crc kubenswrapper[4664]: I1013 09:14:28.813671 4664 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19"} pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 13 09:14:28 crc kubenswrapper[4664]: I1013 09:14:28.813716 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerName="machine-config-daemon" containerID="cri-o://9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" gracePeriod=600 Oct 13 09:14:28 crc kubenswrapper[4664]: E1013 09:14:28.943009 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:14:29 crc kubenswrapper[4664]: I1013 09:14:29.089726 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" event={"ID":"35504ef1-729c-4404-bd49-0d82bf23ccbb","Type":"ContainerDied","Data":"9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19"} Oct 13 09:14:29 crc kubenswrapper[4664]: I1013 09:14:29.089804 4664 scope.go:117] "RemoveContainer" containerID="63a6404a345bd57e47c943aa2b879c928b3c2e3a2894752fb10d2393a45030b4" Oct 13 09:14:29 crc kubenswrapper[4664]: I1013 09:14:29.090564 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:14:29 crc kubenswrapper[4664]: I1013 09:14:29.089618 4664 generic.go:334] "Generic (PLEG): container finished" podID="35504ef1-729c-4404-bd49-0d82bf23ccbb" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" exitCode=0 Oct 13 09:14:29 crc kubenswrapper[4664]: E1013 09:14:29.090937 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:14:41 crc kubenswrapper[4664]: I1013 09:14:41.047651 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:14:41 crc kubenswrapper[4664]: E1013 09:14:41.048969 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.921974 4664 scope.go:117] "RemoveContainer" containerID="5f3f818f54c22f59a1df11e01685f32cea3cb231ca2bf6d89129e67c7931a7f7" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.970747 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tgkbg"] Oct 13 09:14:44 crc kubenswrapper[4664]: E1013 09:14:44.975016 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerName="gather" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.975070 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerName="gather" Oct 13 09:14:44 crc kubenswrapper[4664]: E1013 09:14:44.975121 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="registry-server" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.975136 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="registry-server" Oct 13 09:14:44 crc kubenswrapper[4664]: E1013 09:14:44.975159 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerName="copy" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.975172 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerName="copy" Oct 13 09:14:44 crc kubenswrapper[4664]: E1013 09:14:44.975202 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="extract-content" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.975218 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="extract-content" Oct 13 09:14:44 crc kubenswrapper[4664]: E1013 09:14:44.975244 4664 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="extract-utilities" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.975346 4664 state_mem.go:107] "Deleted CPUSet assignment" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="extract-utilities" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.976524 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerName="copy" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.976588 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="12962c73-4eee-44a6-8382-3c2bb82bb73f" containerName="gather" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.976627 4664 memory_manager.go:354] "RemoveStaleState removing state" podUID="097f8408-d53b-4710-bf60-3d25a16b94ad" containerName="registry-server" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.993579 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:44 crc kubenswrapper[4664]: I1013 09:14:44.997849 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tgkbg"] Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.033770 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl4fn\" (UniqueName: \"kubernetes.io/projected/9b0174de-e143-42fc-a516-92ac379ecd1f-kube-api-access-pl4fn\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.034286 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-catalog-content\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.034366 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-utilities\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.136941 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl4fn\" (UniqueName: \"kubernetes.io/projected/9b0174de-e143-42fc-a516-92ac379ecd1f-kube-api-access-pl4fn\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.137743 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-catalog-content\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.137820 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-utilities\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.137993 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-catalog-content\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.138269 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-utilities\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.166967 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl4fn\" (UniqueName: \"kubernetes.io/projected/9b0174de-e143-42fc-a516-92ac379ecd1f-kube-api-access-pl4fn\") pod \"redhat-operators-tgkbg\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.317471 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:45 crc kubenswrapper[4664]: I1013 09:14:45.939519 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tgkbg"] Oct 13 09:14:45 crc kubenswrapper[4664]: W1013 09:14:45.948012 4664 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b0174de_e143_42fc_a516_92ac379ecd1f.slice/crio-27f6c681367f605808730e58ae22a2edf142b4a459408d7bdd661e25447b9a4d WatchSource:0}: Error finding container 27f6c681367f605808730e58ae22a2edf142b4a459408d7bdd661e25447b9a4d: Status 404 returned error can't find the container with id 27f6c681367f605808730e58ae22a2edf142b4a459408d7bdd661e25447b9a4d Oct 13 09:14:46 crc kubenswrapper[4664]: I1013 09:14:46.334943 4664 generic.go:334] "Generic (PLEG): container finished" podID="9b0174de-e143-42fc-a516-92ac379ecd1f" containerID="0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3" exitCode=0 Oct 13 09:14:46 crc kubenswrapper[4664]: I1013 09:14:46.334992 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tgkbg" event={"ID":"9b0174de-e143-42fc-a516-92ac379ecd1f","Type":"ContainerDied","Data":"0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3"} Oct 13 09:14:46 crc kubenswrapper[4664]: I1013 09:14:46.335304 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tgkbg" event={"ID":"9b0174de-e143-42fc-a516-92ac379ecd1f","Type":"ContainerStarted","Data":"27f6c681367f605808730e58ae22a2edf142b4a459408d7bdd661e25447b9a4d"} Oct 13 09:14:46 crc kubenswrapper[4664]: I1013 09:14:46.339965 4664 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 13 09:14:48 crc kubenswrapper[4664]: I1013 09:14:48.370255 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tgkbg" event={"ID":"9b0174de-e143-42fc-a516-92ac379ecd1f","Type":"ContainerStarted","Data":"7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094"} Oct 13 09:14:51 crc kubenswrapper[4664]: I1013 09:14:51.405499 4664 generic.go:334] "Generic (PLEG): container finished" podID="9b0174de-e143-42fc-a516-92ac379ecd1f" containerID="7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094" exitCode=0 Oct 13 09:14:51 crc kubenswrapper[4664]: I1013 09:14:51.405887 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tgkbg" event={"ID":"9b0174de-e143-42fc-a516-92ac379ecd1f","Type":"ContainerDied","Data":"7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094"} Oct 13 09:14:52 crc kubenswrapper[4664]: I1013 09:14:52.419421 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tgkbg" event={"ID":"9b0174de-e143-42fc-a516-92ac379ecd1f","Type":"ContainerStarted","Data":"8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131"} Oct 13 09:14:52 crc kubenswrapper[4664]: I1013 09:14:52.445051 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tgkbg" podStartSLOduration=2.939059875 podStartE2EDuration="8.445026626s" podCreationTimestamp="2025-10-13 09:14:44 +0000 UTC" firstStartedPulling="2025-10-13 09:14:46.337150693 +0000 UTC m=+8894.024595885" lastFinishedPulling="2025-10-13 09:14:51.843117434 +0000 UTC m=+8899.530562636" observedRunningTime="2025-10-13 09:14:52.43732614 +0000 UTC m=+8900.124771352" watchObservedRunningTime="2025-10-13 09:14:52.445026626 +0000 UTC m=+8900.132471818" Oct 13 09:14:55 crc kubenswrapper[4664]: I1013 09:14:55.046955 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:14:55 crc kubenswrapper[4664]: E1013 09:14:55.047525 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:14:55 crc kubenswrapper[4664]: I1013 09:14:55.320534 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:55 crc kubenswrapper[4664]: I1013 09:14:55.321006 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:14:56 crc kubenswrapper[4664]: I1013 09:14:56.380829 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tgkbg" podUID="9b0174de-e143-42fc-a516-92ac379ecd1f" containerName="registry-server" probeResult="failure" output=< Oct 13 09:14:56 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 09:14:56 crc kubenswrapper[4664]: > Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.216679 4664 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr"] Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.220533 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.235050 4664 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.235055 4664 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.248527 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr"] Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.348817 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-config-volume\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.348953 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfld4\" (UniqueName: \"kubernetes.io/projected/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-kube-api-access-kfld4\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.348979 4664 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-secret-volume\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.450109 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfld4\" (UniqueName: \"kubernetes.io/projected/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-kube-api-access-kfld4\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.450164 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-secret-volume\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.450247 4664 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-config-volume\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.451817 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-config-volume\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.469328 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-secret-volume\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.475051 4664 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfld4\" (UniqueName: \"kubernetes.io/projected/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-kube-api-access-kfld4\") pod \"collect-profiles-29339115-ljngr\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:00 crc kubenswrapper[4664]: I1013 09:15:00.561247 4664 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:01 crc kubenswrapper[4664]: I1013 09:15:01.203990 4664 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr"] Oct 13 09:15:01 crc kubenswrapper[4664]: I1013 09:15:01.506393 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" event={"ID":"9006f1ea-0dac-4e6d-b665-7afa8bfdc066","Type":"ContainerStarted","Data":"e70a3c0b4d54f15d953df3b335a1028532f17ad58fe7ac08f2e3265714c0d491"} Oct 13 09:15:01 crc kubenswrapper[4664]: I1013 09:15:01.506736 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" event={"ID":"9006f1ea-0dac-4e6d-b665-7afa8bfdc066","Type":"ContainerStarted","Data":"80d91193eff215672bb3cd104d3b733dd6fd7f04e6988feab3db60623edf79d3"} Oct 13 09:15:01 crc kubenswrapper[4664]: I1013 09:15:01.526218 4664 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" podStartSLOduration=1.526196273 podStartE2EDuration="1.526196273s" podCreationTimestamp="2025-10-13 09:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-13 09:15:01.525150066 +0000 UTC m=+8909.212595258" watchObservedRunningTime="2025-10-13 09:15:01.526196273 +0000 UTC m=+8909.213641485" Oct 13 09:15:02 crc kubenswrapper[4664]: I1013 09:15:02.521252 4664 generic.go:334] "Generic (PLEG): container finished" podID="9006f1ea-0dac-4e6d-b665-7afa8bfdc066" containerID="e70a3c0b4d54f15d953df3b335a1028532f17ad58fe7ac08f2e3265714c0d491" exitCode=0 Oct 13 09:15:02 crc kubenswrapper[4664]: I1013 09:15:02.521322 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" event={"ID":"9006f1ea-0dac-4e6d-b665-7afa8bfdc066","Type":"ContainerDied","Data":"e70a3c0b4d54f15d953df3b335a1028532f17ad58fe7ac08f2e3265714c0d491"} Oct 13 09:15:03 crc kubenswrapper[4664]: I1013 09:15:03.897174 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.033021 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-secret-volume\") pod \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.033093 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfld4\" (UniqueName: \"kubernetes.io/projected/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-kube-api-access-kfld4\") pod \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.033227 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-config-volume\") pod \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\" (UID: \"9006f1ea-0dac-4e6d-b665-7afa8bfdc066\") " Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.034033 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-config-volume" (OuterVolumeSpecName: "config-volume") pod "9006f1ea-0dac-4e6d-b665-7afa8bfdc066" (UID: "9006f1ea-0dac-4e6d-b665-7afa8bfdc066"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.041708 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-kube-api-access-kfld4" (OuterVolumeSpecName: "kube-api-access-kfld4") pod "9006f1ea-0dac-4e6d-b665-7afa8bfdc066" (UID: "9006f1ea-0dac-4e6d-b665-7afa8bfdc066"). InnerVolumeSpecName "kube-api-access-kfld4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.047291 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9006f1ea-0dac-4e6d-b665-7afa8bfdc066" (UID: "9006f1ea-0dac-4e6d-b665-7afa8bfdc066"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.138445 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfld4\" (UniqueName: \"kubernetes.io/projected/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-kube-api-access-kfld4\") on node \"crc\" DevicePath \"\"" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.138661 4664 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-config-volume\") on node \"crc\" DevicePath \"\"" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.138739 4664 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9006f1ea-0dac-4e6d-b665-7afa8bfdc066-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.546468 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" event={"ID":"9006f1ea-0dac-4e6d-b665-7afa8bfdc066","Type":"ContainerDied","Data":"80d91193eff215672bb3cd104d3b733dd6fd7f04e6988feab3db60623edf79d3"} Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.546745 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29339115-ljngr" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.546882 4664 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80d91193eff215672bb3cd104d3b733dd6fd7f04e6988feab3db60623edf79d3" Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.633928 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5"] Oct 13 09:15:04 crc kubenswrapper[4664]: I1013 09:15:04.640219 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29339070-9cwv5"] Oct 13 09:15:05 crc kubenswrapper[4664]: I1013 09:15:05.060966 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6519e97-c4b7-46bf-9489-ad844d762979" path="/var/lib/kubelet/pods/d6519e97-c4b7-46bf-9489-ad844d762979/volumes" Oct 13 09:15:06 crc kubenswrapper[4664]: I1013 09:15:06.363844 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tgkbg" podUID="9b0174de-e143-42fc-a516-92ac379ecd1f" containerName="registry-server" probeResult="failure" output=< Oct 13 09:15:06 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 09:15:06 crc kubenswrapper[4664]: > Oct 13 09:15:07 crc kubenswrapper[4664]: I1013 09:15:07.046920 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:15:07 crc kubenswrapper[4664]: E1013 09:15:07.047203 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:15:16 crc kubenswrapper[4664]: I1013 09:15:16.363262 4664 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tgkbg" podUID="9b0174de-e143-42fc-a516-92ac379ecd1f" containerName="registry-server" probeResult="failure" output=< Oct 13 09:15:16 crc kubenswrapper[4664]: timeout: failed to connect service ":50051" within 1s Oct 13 09:15:16 crc kubenswrapper[4664]: > Oct 13 09:15:18 crc kubenswrapper[4664]: I1013 09:15:18.047726 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:15:18 crc kubenswrapper[4664]: E1013 09:15:18.049512 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:15:25 crc kubenswrapper[4664]: I1013 09:15:25.371180 4664 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:15:25 crc kubenswrapper[4664]: I1013 09:15:25.437544 4664 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:15:25 crc kubenswrapper[4664]: I1013 09:15:25.615801 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tgkbg"] Oct 13 09:15:26 crc kubenswrapper[4664]: I1013 09:15:26.772973 4664 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tgkbg" podUID="9b0174de-e143-42fc-a516-92ac379ecd1f" containerName="registry-server" containerID="cri-o://8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131" gracePeriod=2 Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.308221 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.434200 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl4fn\" (UniqueName: \"kubernetes.io/projected/9b0174de-e143-42fc-a516-92ac379ecd1f-kube-api-access-pl4fn\") pod \"9b0174de-e143-42fc-a516-92ac379ecd1f\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.434276 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-catalog-content\") pod \"9b0174de-e143-42fc-a516-92ac379ecd1f\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.434370 4664 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-utilities\") pod \"9b0174de-e143-42fc-a516-92ac379ecd1f\" (UID: \"9b0174de-e143-42fc-a516-92ac379ecd1f\") " Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.435400 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-utilities" (OuterVolumeSpecName: "utilities") pod "9b0174de-e143-42fc-a516-92ac379ecd1f" (UID: "9b0174de-e143-42fc-a516-92ac379ecd1f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.443210 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b0174de-e143-42fc-a516-92ac379ecd1f-kube-api-access-pl4fn" (OuterVolumeSpecName: "kube-api-access-pl4fn") pod "9b0174de-e143-42fc-a516-92ac379ecd1f" (UID: "9b0174de-e143-42fc-a516-92ac379ecd1f"). InnerVolumeSpecName "kube-api-access-pl4fn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.503624 4664 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b0174de-e143-42fc-a516-92ac379ecd1f" (UID: "9b0174de-e143-42fc-a516-92ac379ecd1f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.536383 4664 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl4fn\" (UniqueName: \"kubernetes.io/projected/9b0174de-e143-42fc-a516-92ac379ecd1f-kube-api-access-pl4fn\") on node \"crc\" DevicePath \"\"" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.536410 4664 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.536419 4664 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b0174de-e143-42fc-a516-92ac379ecd1f-utilities\") on node \"crc\" DevicePath \"\"" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.789333 4664 generic.go:334] "Generic (PLEG): container finished" podID="9b0174de-e143-42fc-a516-92ac379ecd1f" containerID="8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131" exitCode=0 Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.789399 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tgkbg" event={"ID":"9b0174de-e143-42fc-a516-92ac379ecd1f","Type":"ContainerDied","Data":"8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131"} Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.789466 4664 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tgkbg" event={"ID":"9b0174de-e143-42fc-a516-92ac379ecd1f","Type":"ContainerDied","Data":"27f6c681367f605808730e58ae22a2edf142b4a459408d7bdd661e25447b9a4d"} Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.789470 4664 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tgkbg" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.789526 4664 scope.go:117] "RemoveContainer" containerID="8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.831005 4664 scope.go:117] "RemoveContainer" containerID="7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.856507 4664 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tgkbg"] Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.868773 4664 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tgkbg"] Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.875304 4664 scope.go:117] "RemoveContainer" containerID="0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.922562 4664 scope.go:117] "RemoveContainer" containerID="8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131" Oct 13 09:15:27 crc kubenswrapper[4664]: E1013 09:15:27.923052 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131\": container with ID starting with 8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131 not found: ID does not exist" containerID="8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.923115 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131"} err="failed to get container status \"8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131\": rpc error: code = NotFound desc = could not find container \"8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131\": container with ID starting with 8856700c65132e13f25a806bfc7829b8077c931c1254a2f944e1c320394aa131 not found: ID does not exist" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.923166 4664 scope.go:117] "RemoveContainer" containerID="7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094" Oct 13 09:15:27 crc kubenswrapper[4664]: E1013 09:15:27.924067 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094\": container with ID starting with 7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094 not found: ID does not exist" containerID="7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.924100 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094"} err="failed to get container status \"7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094\": rpc error: code = NotFound desc = could not find container \"7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094\": container with ID starting with 7d8eacb173ef27455f7da2c4f89a8812c491788c14e8576b7ac5c3ecad172094 not found: ID does not exist" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.924122 4664 scope.go:117] "RemoveContainer" containerID="0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3" Oct 13 09:15:27 crc kubenswrapper[4664]: E1013 09:15:27.924453 4664 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3\": container with ID starting with 0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3 not found: ID does not exist" containerID="0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3" Oct 13 09:15:27 crc kubenswrapper[4664]: I1013 09:15:27.924500 4664 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3"} err="failed to get container status \"0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3\": rpc error: code = NotFound desc = could not find container \"0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3\": container with ID starting with 0abbc01dab62e38023cd41e6be07fbb498402a0f8765b0ca5c559eb922ce25f3 not found: ID does not exist" Oct 13 09:15:29 crc kubenswrapper[4664]: I1013 09:15:29.065375 4664 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b0174de-e143-42fc-a516-92ac379ecd1f" path="/var/lib/kubelet/pods/9b0174de-e143-42fc-a516-92ac379ecd1f/volumes" Oct 13 09:15:30 crc kubenswrapper[4664]: I1013 09:15:30.047142 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:15:30 crc kubenswrapper[4664]: E1013 09:15:30.047562 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:15:43 crc kubenswrapper[4664]: I1013 09:15:43.053324 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:15:43 crc kubenswrapper[4664]: E1013 09:15:43.054047 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:15:44 crc kubenswrapper[4664]: I1013 09:15:44.995913 4664 scope.go:117] "RemoveContainer" containerID="f29e57bd6ed7c118ce3e25808a1d4d084ebf3f6d978f52cdf09c32bd6fcfa656" Oct 13 09:15:55 crc kubenswrapper[4664]: I1013 09:15:55.050487 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:15:55 crc kubenswrapper[4664]: E1013 09:15:55.051258 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:16:10 crc kubenswrapper[4664]: I1013 09:16:10.047390 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:16:10 crc kubenswrapper[4664]: E1013 09:16:10.048595 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:16:24 crc kubenswrapper[4664]: I1013 09:16:24.047839 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:16:24 crc kubenswrapper[4664]: E1013 09:16:24.049113 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" Oct 13 09:16:38 crc kubenswrapper[4664]: I1013 09:16:38.047477 4664 scope.go:117] "RemoveContainer" containerID="9835b73c305ce0f8f05ed355df45eb87833ba78ec9efb325b93639d750ebfc19" Oct 13 09:16:38 crc kubenswrapper[4664]: E1013 09:16:38.048326 4664 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hkzpl_openshift-machine-config-operator(35504ef1-729c-4404-bd49-0d82bf23ccbb)\"" pod="openshift-machine-config-operator/machine-config-daemon-hkzpl" podUID="35504ef1-729c-4404-bd49-0d82bf23ccbb" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515073141603024445 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015073141604017363 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015073117477016521 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015073117477015471 5ustar corecore